/ duct-tape / src / thread.c
thread.c
   1  #include <darlingserver/duct-tape/stubs.h>
   2  #include <darlingserver/duct-tape.h>
   3  #include <darlingserver/duct-tape/task.h>
   4  #include <darlingserver/duct-tape/thread.h>
   5  #include <darlingserver/duct-tape/hooks.internal.h>
   6  #include <darlingserver/duct-tape/log.h>
   7  #include <darlingserver/duct-tape/psynch.h>
   8  
   9  #include <kern/thread.h>
  10  #include <kern/ipc_tt.h>
  11  #include <kern/policy_internal.h>
  12  #include <mach/thread_act.h>
  13  #include <sys/systm.h>
  14  #include <sys/ux_exception.h>
  15  
  16  #include <stdlib.h>
  17  
  18  #include <rtsig.h>
  19  
  20  #define LINUX_ENOSYS 38
  21  #define LINUX_EFAULT 14
  22  
  23  #define LINUX_SI_USER 0
  24  #define LINUX_SI_KERNEL 0x80
  25  #define LINUX_TRAP_HWBKPT 4
  26  
  27  #define LINUX_SIGSEGV 11
  28  #define LINUX_SIGBUS 7
  29  #define LINUX_SIGILL 4
  30  #define LINUX_SIGFPE 8
  31  #define LINUX_SIGTRAP 5
  32  
  33  // stub
  34  uint32_t sched_mach_factor = 0;
  35  
  36  // stub
  37  const qos_policy_params_t thread_qos_policy_params;
  38  
  39  // stub
  40  int thread_max = CONFIG_THREAD_MAX;
  41  
  42  kern_return_t thread_set_state(register thread_t thread, int flavor, thread_state_t state, mach_msg_type_number_t state_count);
  43  
  44  kern_return_t thread_get_state(thread_t thread, int flavor, thread_state_t state, mach_msg_type_number_t* state_count);
  45  
  46  dtape_thread_t* dtape_thread_create(dtape_task_t* task, uint64_t nsid, void* context) {
  47  	dtape_thread_t* thread = malloc(sizeof(dtape_thread_t));
  48  	if (!thread) {
  49  		return NULL;
  50  	}
  51  
  52  	thread->context = context;
  53  	thread->processing_signal = false;
  54  	thread->name = NULL;
  55  	thread->waiting_suspended = false;
  56  	LIST_INIT(&thread->user_states);
  57  	dtape_mutex_init(&thread->suspension_mutex);
  58  	dtape_condvar_init(&thread->suspension_condvar);
  59  	memset(&thread->xnu_thread, 0, sizeof(thread->xnu_thread));
  60  	memset(&thread->kwe, 0, sizeof(thread->kwe));
  61  
  62  	memset(&thread->default_state, 0, sizeof(thread->default_state));
  63  	LIST_INSERT_HEAD(&thread->user_states, &thread->default_state, link);
  64  
  65  	// this next section uses code adapted from XNU's thread_create_internal() in osfmk/kern/thread.c
  66  
  67  	thread->xnu_thread.wait_result = THREAD_WAITING;
  68  	thread->xnu_thread.options = THREAD_ABORTSAFE;
  69  	thread->xnu_thread.state = TH_RUN;
  70  
  71  	os_ref_init_count(&thread->xnu_thread.ref_count, NULL, 1);
  72  
  73  	thread->xnu_thread.task = &task->xnu_task;
  74  
  75  	thread_lock_init(&thread->xnu_thread);
  76  	wake_lock_init(&thread->xnu_thread);
  77  
  78  	lck_mtx_init(&thread->xnu_thread.mutex, LCK_GRP_NULL, LCK_ATTR_NULL);
  79  
  80  	ipc_thread_init(&thread->xnu_thread, IPC_THREAD_INIT_NONE);
  81  
  82  	task_lock(&task->xnu_task);
  83  
  84  	task_reference_internal(&task->xnu_task);
  85  
  86  	queue_enter(&task->xnu_task.threads, &thread->xnu_thread, thread_t, task_threads);
  87  	task->xnu_task.thread_count++;
  88  
  89  	os_atomic_inc(&task->xnu_task.active_thread_count, relaxed);
  90  
  91  	thread->xnu_thread.active = true;
  92  
  93  	thread->xnu_thread.turnstile = turnstile_alloc();
  94  
  95  	task_unlock(&task->xnu_task);
  96  
  97  	thread->xnu_thread.thread_id = nsid;
  98  
  99  	thread->xnu_thread.map = task->xnu_task.map;
 100  
 101  	timer_call_setup(&thread->xnu_thread.wait_timer, thread_timer_expire, &thread->xnu_thread);
 102  
 103  	dtape_psynch_thread_init(thread);
 104  
 105  	return thread;
 106  };
 107  
 108  void dtape_thread_destroy(dtape_thread_t* thread) {
 109  	dtape_log_debug("%llu: thread being destroyed", thread->xnu_thread.thread_id);
 110  
 111  	dtape_psynch_thread_destroy(thread);
 112  
 113  	// this next section uses code adapted from XNU's thread_deallocate_complete() in osfmk/kern/thread.c
 114  
 115  	ipc_thread_disable(&thread->xnu_thread);
 116  	ipc_thread_terminate(&thread->xnu_thread);
 117  
 118  	if (thread->xnu_thread.turnstile) {
 119  		turnstile_deallocate(thread->xnu_thread.turnstile);
 120  	}
 121  
 122  	if (IPC_VOUCHER_NULL != thread->xnu_thread.ith_voucher) {
 123  		ipc_voucher_release(thread->xnu_thread.ith_voucher);
 124  	}
 125  
 126  	thread_lock(&thread->xnu_thread);
 127  
 128  	/*
 129  	 *	Cancel wait timer, and wait for
 130  	 *	concurrent expirations.
 131  	 */
 132  	if (thread->xnu_thread.wait_timer_is_set) {
 133  		thread->xnu_thread.wait_timer_is_set = FALSE;
 134  
 135  		if (timer_call_cancel(&thread->xnu_thread.wait_timer)) {
 136  			thread->xnu_thread.wait_timer_active--;
 137  		}
 138  	}
 139  
 140  	while (thread->xnu_thread.wait_timer_active > 0);
 141  
 142  	// pull the thread from any waitqs it might have been waiting on
 143  	thread->xnu_thread.state |= TH_TERMINATE;
 144  	thread->xnu_thread.state &= ~(TH_UNINT);
 145  	clear_wait_internal(&thread->xnu_thread, THREAD_INTERRUPTED);
 146  
 147  	thread_unlock(&thread->xnu_thread);
 148  
 149  	lck_mtx_destroy(&thread->xnu_thread.mutex, LCK_GRP_NULL);
 150  
 151  	// remove this thread from the task's thread list
 152  	task_lock(thread->xnu_thread.task);
 153  	queue_remove(&thread->xnu_thread.task->threads, &thread->xnu_thread, thread_t, task_threads);
 154  	thread->xnu_thread.task->thread_count--;
 155  	task_unlock(thread->xnu_thread.task);
 156  
 157  	task_deallocate(thread->xnu_thread.task);
 158  
 159  	dtape_hooks->thread_context_dispose(thread->context);
 160  
 161  	free(thread);
 162  };
 163  
 164  void dtape_thread_entering(dtape_thread_t* thread) {
 165  	// if the thread is entering, it cannot be waiting
 166  	thread->xnu_thread.state &= ~(TH_WAIT | TH_UNINT);
 167  	thread->xnu_thread.state |= TH_RUN;
 168  	thread->xnu_thread.block_hint = kThreadWaitNone;
 169  };
 170  
 171  void dtape_thread_exiting(dtape_thread_t* thread) {
 172  	thread->xnu_thread.state &= ~TH_RUN;
 173  };
 174  
 175  void dtape_thread_set_handles(dtape_thread_t* thread, uintptr_t pthread_handle, uintptr_t dispatch_qaddr) {
 176  	thread_lock(&thread->xnu_thread);
 177  	thread->pthread_handle = pthread_handle;
 178  	thread->dispatch_qaddr = dispatch_qaddr;
 179  	thread_unlock(&thread->xnu_thread);
 180  };
 181  
 182  dtape_thread_t* dtape_thread_for_port(uint32_t thread_port) {
 183  	thread_t xnu_thread = port_name_to_thread(thread_port, PORT_TO_THREAD_NONE);
 184  	if (!xnu_thread) {
 185  		return NULL;
 186  	}
 187  	// port_name_to_thread returns a reference on the thread upon success.
 188  	// because we cannot take a reference on the duct-taped thread owner,
 189  	// this reference is meaningless. therefore, we drop it.
 190  	// we entrust our caller with the responsibility of ensuring it remains alive.
 191  	thread_deallocate(xnu_thread);
 192  	return dtape_thread_for_xnu_thread(xnu_thread);
 193  };
 194  
 195  void* dtape_thread_context(dtape_thread_t* thread) {
 196  	return thread->context;
 197  };
 198  
 199  int dtape_thread_load_state_from_user(dtape_thread_t* thread, uintptr_t thread_state_address, uintptr_t float_state_address) {
 200  	dtape_task_t* task = dtape_task_for_thread(thread);
 201  
 202  	if (task->architecture == dserver_rpc_architecture_x86_64) {
 203  		x86_thread_state64_t tstate;
 204  		x86_float_state64_t fstate;
 205  
 206  		if (copyin(thread_state_address, &tstate, sizeof(tstate)) || copyin(float_state_address, &fstate, sizeof(fstate))) {
 207  			return -LINUX_EFAULT;
 208  		}
 209  
 210  		thread_set_state(current_thread(), x86_THREAD_STATE64, (thread_state_t) &tstate, x86_THREAD_STATE64_COUNT);
 211  		thread_set_state(current_thread(), x86_FLOAT_STATE64, (thread_state_t) &fstate, x86_FLOAT_STATE64_COUNT);
 212  	} else if (task->architecture == dserver_rpc_architecture_i386) {
 213  		x86_thread_state32_t tstate;
 214  		x86_float_state32_t fstate;
 215  
 216  		if (copyin(thread_state_address, &tstate, sizeof(tstate)) || copyin(float_state_address, &fstate, sizeof(fstate))) {
 217  			return -LINUX_EFAULT;
 218  		}
 219  
 220  		thread_set_state(current_thread(), x86_THREAD_STATE32, (thread_state_t) &tstate, x86_THREAD_STATE32_COUNT);
 221  		thread_set_state(current_thread(), x86_FLOAT_STATE32, (thread_state_t) &fstate, x86_FLOAT_STATE32_COUNT);
 222  	} else {
 223  		dtape_log_error("dtape_thread_load_state_from_user() unimplemented for architecture: %d", task->architecture);
 224  		return -LINUX_ENOSYS;
 225  	}
 226  
 227  	return 0;
 228  };
 229  
 230  int dtape_thread_save_state_to_user(dtape_thread_t* thread, uintptr_t thread_state_address, uintptr_t float_state_address) {
 231  	dtape_task_t* task = dtape_task_for_thread(thread);
 232  
 233  	if (task->architecture == dserver_rpc_architecture_x86_64) {
 234  		x86_thread_state64_t tstate;
 235  		x86_float_state64_t fstate;
 236  		mach_msg_type_number_t count;
 237  
 238  		count = x86_THREAD_STATE64_COUNT;
 239  		thread_get_state(current_thread(), x86_THREAD_STATE64, (thread_state_t) &tstate, &count);
 240  
 241  		count = x86_FLOAT_STATE64_COUNT;
 242  		thread_get_state(current_thread(), x86_FLOAT_STATE64, (thread_state_t) &fstate, &count);
 243  
 244  		if (copyout(&tstate, thread_state_address, sizeof(tstate)) || copyout(&fstate, float_state_address, sizeof(fstate))) {
 245  			return -LINUX_EFAULT;
 246  		}
 247  	} else if (task->architecture == dserver_rpc_architecture_i386) {
 248  		x86_thread_state32_t tstate;
 249  		x86_float_state32_t fstate;
 250  		mach_msg_type_number_t count;
 251  
 252  		count = x86_THREAD_STATE32_COUNT;
 253  		thread_get_state(current_thread(), x86_THREAD_STATE32, (thread_state_t) &tstate, &count);
 254  
 255  		count = x86_FLOAT_STATE32_COUNT;
 256  		thread_get_state(current_thread(), x86_FLOAT_STATE32, (thread_state_t) &fstate, &count);
 257  
 258  		if (copyout(&tstate, thread_state_address, sizeof(tstate)) || copyout(&fstate, float_state_address, sizeof(fstate))) {
 259  			return -LINUX_EFAULT;
 260  		}
 261  	} else {
 262  		dtape_log_error("dtape_thread_save_state_to_user() unimplemented for architecture: %d", task->architecture);
 263  		return -LINUX_ENOSYS;
 264  	}
 265  
 266  	return 0;
 267  };
 268  
 269  void dtape_thread_process_signal(dtape_thread_t* thread, int bsd_signal_number, int linux_signal_number, int code, uintptr_t signal_address) {
 270  	mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = { 0, 0 };
 271  	dtape_task_t* task = dtape_task_for_thread(thread);
 272  
 273  	thread->processing_signal = true;
 274  
 275  	if (code == LINUX_SI_USER) {
 276  		if (task->has_sigexc) {
 277  			codes[0] = EXC_SOFT_SIGNAL;
 278  			codes[1] = bsd_signal_number;
 279  			bsd_exception(EXC_SOFTWARE, codes, 2);
 280  		} else {
 281  			dtape_hooks->thread_set_pending_signal(thread->context, bsd_signal_number);
 282  		}
 283  
 284  		goto out;
 285  	}
 286  
 287  	int mach_exception = 0;
 288  	switch (linux_signal_number) {
 289  		case LINUX_SIGSEGV: // KERN_INVALID_ADDRESS
 290  			mach_exception = EXC_BAD_ACCESS;
 291  			codes[0] = KERN_INVALID_ADDRESS;
 292  			codes[1] = signal_address;
 293  			break;
 294  		case LINUX_SIGBUS:
 295  			mach_exception = EXC_BAD_ACCESS;
 296  			codes[0] = EXC_I386_ALIGNFLT;
 297  			break;
 298  		case LINUX_SIGILL:
 299  			mach_exception = EXC_BAD_INSTRUCTION;
 300  			codes[0] = EXC_I386_INVOP;
 301  			break;
 302  		case LINUX_SIGFPE:
 303  			mach_exception = EXC_ARITHMETIC;
 304  			codes[0] = code;
 305  			break;
 306  		case LINUX_SIGTRAP:
 307  			mach_exception = EXC_BREAKPOINT;
 308  			codes[0] = (code == LINUX_SI_KERNEL) ? EXC_I386_BPT : EXC_I386_SGL;
 309  
 310  			if (code == LINUX_TRAP_HWBKPT) {
 311  #if 0
 312  				codes[1] = thread->triggered_watchpoint_address;
 313  #else
 314  				dtape_stub("LINUX_TRAP_HWBKPT");
 315  				codes[1] = 0;
 316  #endif
 317  			}
 318  			break;
 319  		/*
 320  		case LINUX_SIGSYS:
 321  			mach_exception = EXC_SOFTWARE;
 322  			if (codes[0] == 0)
 323  				codes[0] = EXC_UNIX_BAD_SYSCALL;
 324  		case LINUX_SIGPIPE:
 325  			mach_exception = EXC_SOFTWARE;
 326  			if (codes[0] == 0)
 327  				codes[0] = EXC_UNIX_BAD_PIPE;
 328  		case LINUX_SIGABRT:
 329  			mach_exception = EXC_SOFTWARE;
 330  			if (codes[0] == 0)
 331  				codes[0] = EXC_UNIX_ABORT;
 332  		*/
 333  		default:
 334  			if (task->has_sigexc) {
 335  				if (codes[0] == 0)
 336  					codes[0] = EXC_SOFT_SIGNAL;
 337  				codes[1] = bsd_signal_number;
 338  				bsd_exception(EXC_SOFTWARE, codes, 2);
 339  			} else {
 340  				dtape_hooks->thread_set_pending_signal(thread->context, bsd_signal_number);
 341  			}
 342  			goto out;
 343  	}
 344  
 345  	dtape_log_debug("calling exception_triage_thread(%d, [%lld, %lld])", mach_exception, codes[0], codes[1]);
 346  
 347  	exception_triage_thread(mach_exception, codes, EXCEPTION_CODE_MAX, &thread->xnu_thread);
 348  
 349  	dtape_log_debug("exception_triage_thread returned");
 350  
 351  out:
 352  	thread->processing_signal = false;
 353  };
 354  
 355  extern int ux_exception(int exception, mach_exception_code_t code, mach_exception_subcode_t subcode);
 356  
 357  kern_return_t handle_ux_exception(thread_t xthread, int exception, mach_exception_code_t code, mach_exception_subcode_t subcode) {
 358  	dtape_thread_t* thread = dtape_thread_for_xnu_thread(xthread);
 359  
 360  	// translate exception and code to signal type
 361  	int ux_signal = ux_exception(exception, code, subcode);
 362  
 363  	if (thread->processing_signal) {
 364  		dtape_hooks->thread_set_pending_signal(thread->context, ux_signal);
 365  	} else {
 366  		dtape_stub_unsafe("handle_ux_exception(): TODO: introduce signal into thread");
 367  	}
 368  
 369  	return KERN_SUCCESS;
 370  };
 371  
 372  void dtape_thread_wait_while_user_suspended(dtape_thread_t* thread) {
 373  	if (&thread->xnu_thread != current_thread()) {
 374  		panic("Cannot wait with non-current thread");
 375  	}
 376  
 377  	// TODO: we need to somehow detect when the thread has a signal pending.
 378  	//
 379  	//       we can check `/proc/<pid>/task/<tid>/status` and look at `SigPnd`,
 380  	//       but this would require us checking periodically (i.e. polling).
 381  	//       not terrible, but not ideal. additionally, we wouldn't do this for ALL threads,
 382  	//       only threads currently blocked here, so it's not so bad.
 383  	//
 384  	//       we could also use SA_NODEFER and immediately have the process notify us.
 385  	//       this requires a lot more work to implement properly, however.
 386  	//       but, it does mean that we avoid polling.
 387  	//
 388  	//       another possible approach is to take advantage of a strange epoll and signalfd interaction described here: https://stackoverflow.com/a/29751604/6620880
 389  	//       essentially, we send a thread our epoll descriptor along with some data (for us to identify the new context) and have it register a signalfd for itself.
 390  	//       when the thread receives a signal, our epoll context will be notified.
 391  	//       unfortunately, this has downsides either way it's done:
 392  	//         1. we can register the signalfd at the start of each thread, which saves us the delay of doing it on every signal,
 393  	//            but this means each thread will use yet another descriptor (in addition to their individual RPC sockets).
 394  	//         2. we can register the signalfd only when we receive a signal, since we only need to check for pending signals during sigprocess,
 395  	//            but this means signal processing incurs an additional delay.
 396  
 397  	while (thread->xnu_thread.suspend_count > 0) {
 398  		dtape_log_debug("sigexc: going to sleep");
 399  
 400  		dtape_mutex_lock(&thread->suspension_mutex);
 401  		thread->waiting_suspended = true;
 402  		dtape_mutex_unlock(&thread->suspension_mutex);
 403  		dtape_condvar_signal(&thread->suspension_condvar, SIZE_MAX);
 404  
 405  		// FIXME: possible race condition here between notifying of waiting and actually sleeping
 406  
 407  		thread->xnu_thread.wait_result = THREAD_WAITING;
 408  
 409  		dtape_hooks->thread_suspend(thread->context, NULL, NULL, NULL);
 410  
 411  		dtape_log_debug("sigexc: woken up");
 412  
 413  		dtape_mutex_lock(&thread->suspension_mutex);
 414  		thread->waiting_suspended = false;
 415  		dtape_mutex_unlock(&thread->suspension_mutex);
 416  		dtape_condvar_signal(&thread->suspension_condvar, SIZE_MAX);
 417  
 418  		if (thread->xnu_thread.wait_result == THREAD_INTERRUPTED) {
 419  			break;
 420  		}
 421  	}
 422  };
 423  
 424  void dtape_thread_retain(dtape_thread_t* thread) {
 425  	thread_reference(&thread->xnu_thread);
 426  };
 427  
 428  void dtape_thread_release(dtape_thread_t* thread) {
 429  	thread_deallocate(&thread->xnu_thread);
 430  };
 431  
 432  void dtape_thread_sigexc_enter(dtape_thread_t* thread) {
 433  	thread_lock(&thread->xnu_thread);
 434  	thread->xnu_thread.state &= ~(TH_UNINT | TH_WAIT);
 435  	thread->xnu_thread.wait_result = THREAD_INTERRUPTED;
 436  	clear_wait_internal(&thread->xnu_thread, THREAD_INTERRUPTED);
 437  	thread_unlock(&thread->xnu_thread);
 438  };
 439  
 440  void dtape_thread_sigexc_enter2(dtape_thread_t* thread) {
 441  	dtape_thread_user_state_t* new_user_state = malloc(sizeof(dtape_thread_user_state_t));
 442  	if (!new_user_state) {
 443  		panic("ran out of memory");
 444  	}
 445  
 446  	memset(new_user_state, 0, sizeof(*new_user_state));
 447  
 448  	thread_lock(&thread->xnu_thread);
 449  	LIST_INSERT_HEAD(&thread->user_states, new_user_state, link);
 450  	thread_unlock(&thread->xnu_thread);
 451  };
 452  
 453  void dtape_thread_sigexc_exit(dtape_thread_t* thread) {
 454  	dtape_thread_user_state_t* user_state = NULL;
 455  
 456  	thread_lock(&thread->xnu_thread);
 457  	user_state = LIST_FIRST(&thread->user_states);
 458  	LIST_REMOVE(user_state, link);
 459  	thread_unlock(&thread->xnu_thread);
 460  
 461  	free(user_state);
 462  };
 463  
 464  void dtape_thread_dying(dtape_thread_t* thread) {
 465  	thread_lock(&thread->xnu_thread);
 466  	thread->xnu_thread.state &= ~(TH_UNINT | TH_WAIT);
 467  	thread->xnu_thread.state |= TH_TERMINATE;
 468  	thread->xnu_thread.wait_result = THREAD_INTERRUPTED;
 469  	clear_wait_internal(&thread->xnu_thread, THREAD_INTERRUPTED);
 470  	thread_unlock(&thread->xnu_thread);
 471  };
 472  
 473  thread_t current_thread(void) {
 474  	dtape_thread_t* thread = dtape_hooks->current_thread();
 475  	return thread ? &thread->xnu_thread : NULL;
 476  };
 477  
 478  void (thread_reference)(thread_t thread) {
 479  	os_ref_retain(&thread->ref_count);
 480  };
 481  
 482  void thread_deallocate(thread_t xthread) {
 483  	dtape_thread_t* thread = dtape_thread_for_xnu_thread(xthread);
 484  	if (os_ref_release(&xthread->ref_count) > 0) {
 485  		return;
 486  	}
 487  	dtape_thread_destroy(thread);
 488  };
 489  
 490  void thread_deallocate_safe(thread_t thread) {
 491  	return thread_deallocate(thread);
 492  };
 493  
 494  static void thread_continuation_callback(void* context) {
 495  	dtape_thread_t* thread = context;
 496  	thread_continue_t continuation;
 497  	void* parameter;
 498  	wait_result_t wait_result;
 499  
 500  	thread_lock(&thread->xnu_thread);
 501  	continuation = thread->xnu_thread.continuation;
 502  	thread->xnu_thread.continuation = NULL;
 503  
 504  	parameter = thread->xnu_thread.parameter;
 505  	thread->xnu_thread.parameter = NULL;
 506  
 507  	wait_result = thread->xnu_thread.wait_result;
 508  	thread_unlock(&thread->xnu_thread);
 509  
 510  	continuation(parameter, wait_result);
 511  
 512  	thread_terminate_self();
 513  };
 514  
 515  wait_result_t thread_block_parameter(thread_continue_t continuation, void* parameter) {
 516  	dtape_thread_t* thread = dtape_hooks->current_thread();
 517  
 518  	thread_lock(&thread->xnu_thread);
 519  
 520  	thread->xnu_thread.continuation = continuation;
 521  	thread->xnu_thread.parameter = parameter;
 522  
 523  	bool waiting = thread->xnu_thread.state & TH_WAIT;
 524  
 525  	thread_unlock(&thread->xnu_thread);
 526  
 527  	if (waiting) {
 528  		dtape_hooks->thread_suspend(thread->context, continuation ? thread_continuation_callback : NULL, thread, NULL);
 529  	}
 530  
 531  	thread_lock(&thread->xnu_thread);
 532  	wait_result_t wait_result = thread->xnu_thread.wait_result;
 533  	thread_unlock(&thread->xnu_thread);
 534  
 535  	if (continuation) {
 536  		// TODO: we should add a thread hook to jump to a continuation without suspending
 537  		continuation(parameter, wait_result);
 538  		__builtin_unreachable();
 539  	}
 540  
 541  	return wait_result;
 542  };
 543  
 544  wait_result_t thread_block(thread_continue_t continuation) {
 545  	return thread_block_parameter(continuation, NULL);
 546  };
 547  
 548  // thread locked
 549  boolean_t thread_unblock(thread_t xthread, wait_result_t wresult) {
 550  	dtape_thread_t* thread = dtape_thread_for_xnu_thread(xthread);
 551  	thread->xnu_thread.wait_result = wresult;
 552  	dtape_hooks->thread_resume(thread->context);
 553  	return TRUE;
 554  };
 555  
 556  // thread locked
 557  kern_return_t thread_go(thread_t thread, wait_result_t wresult, waitq_options_t option) {
 558  	return thread_unblock(thread, wresult) ? KERN_SUCCESS : KERN_FAILURE;
 559  };
 560  
 561  wait_result_t thread_mark_wait_locked(thread_t thread, wait_interrupt_t interruptible_orig) {
 562  	dtape_stub_safe();
 563  	thread->state = TH_WAIT;
 564  	thread->wait_result = THREAD_WAITING;
 565  	thread->block_hint = thread->pending_block_hint;
 566  	thread->pending_block_hint = kThreadWaitNone;
 567  	return THREAD_WAITING;
 568  };
 569  
 570  kern_return_t thread_terminate(thread_t xthread) {
 571  	dtape_thread_t* thread = dtape_thread_for_xnu_thread(xthread);
 572  	dtape_hooks->thread_terminate(thread->context);
 573  	return KERN_SUCCESS;
 574  };
 575  
 576  void thread_terminate_self(void) {
 577  	thread_terminate(current_thread());
 578  };
 579  
 580  void thread_sched_call(thread_t thread, sched_call_t call) {
 581  	thread->sched_call = call;
 582  };
 583  
 584  kern_return_t kernel_thread_create(thread_continue_t continuation, void* parameter, integer_t priority, thread_t* new_thread) {
 585  	dtape_thread_t* thread = dtape_hooks->thread_create_kernel();
 586  	if (!thread) {
 587  		return KERN_FAILURE;
 588  	}
 589  
 590  	thread_reference(&thread->xnu_thread);
 591  	*new_thread = &thread->xnu_thread;
 592  
 593  	thread->xnu_thread.continuation = continuation;
 594  	thread->xnu_thread.parameter = parameter;
 595  	thread->xnu_thread.state = TH_WAIT | TH_UNINT;
 596  
 597  	dtape_hooks->thread_setup(thread->context, thread_continuation_callback, thread);
 598  
 599  	return KERN_SUCCESS;
 600  };
 601  
 602  void thread_set_thread_name(thread_t xthread, const char* name) {
 603  	dtape_thread_t* thread = dtape_thread_for_xnu_thread(xthread);
 604  	thread->name = name;
 605  };
 606  
 607  __attribute__((noreturn))
 608  void thread_syscall_return(kern_return_t ret) {
 609  	dtape_hooks->current_thread_syscall_return(ret);
 610  	__builtin_unreachable();
 611  };
 612  
 613  kern_return_t
 614  thread_set_state(
 615      register thread_t       thread,
 616      int                     flavor,
 617      thread_state_t          state,
 618      mach_msg_type_number_t  state_count)
 619  {
 620  	dtape_thread_t* dthread = dtape_thread_for_xnu_thread(thread);
 621  	dtape_task_t* dtask = dtape_task_for_thread(dthread);
 622  	dtape_thread_user_state_t* user_state = LIST_FIRST(&dthread->user_states);
 623  
 624  	if (dtask->architecture == dserver_rpc_architecture_x86_64 || dtask->architecture == dserver_rpc_architecture_i386) {
 625  		switch (flavor)
 626  		{
 627  			case x86_THREAD_STATE:
 628  			{
 629  				x86_thread_state_t* s = (x86_thread_state_t*) state;
 630  
 631  				if (state_count < x86_THREAD_STATE_COUNT)
 632  					return KERN_INVALID_ARGUMENT;
 633  
 634  				if (s->tsh.flavor == x86_THREAD_STATE32)
 635  				{
 636  					if (dtask->architecture == dserver_rpc_architecture_x86_64)
 637  						return KERN_INVALID_ARGUMENT;
 638  
 639  					state_count = s->tsh.count;
 640  					state = (thread_state_t) &s->uts.ts32;
 641  				}
 642  				else if (s->tsh.flavor == x86_THREAD_STATE64)
 643  				{
 644  					if (dtask->architecture != dserver_rpc_architecture_x86_64)
 645  						return KERN_INVALID_ARGUMENT;
 646  
 647  					state_count = s->tsh.count;
 648  					state = (thread_state_t) &s->uts.ts64;
 649  				}
 650  				else
 651  					return KERN_INVALID_ARGUMENT;
 652  
 653  				flavor = s->tsh.flavor;
 654  				break;
 655  			}
 656  			case x86_FLOAT_STATE:
 657  			{
 658  				x86_float_state_t* s = (x86_float_state_t*) state;
 659  
 660  				if (state_count < x86_FLOAT_STATE_COUNT)
 661  					return KERN_INVALID_ARGUMENT;
 662  
 663  				if (s->fsh.flavor == x86_FLOAT_STATE32)
 664  				{
 665  					if (dtask->architecture == dserver_rpc_architecture_x86_64)
 666  						return KERN_INVALID_ARGUMENT;
 667  
 668  					state_count = s->fsh.count;
 669  					state = (thread_state_t) &s->ufs.fs32;
 670  				}
 671  				else if (s->fsh.flavor == x86_FLOAT_STATE64)
 672  				{
 673  					if (dtask->architecture != dserver_rpc_architecture_x86_64)
 674  						return KERN_INVALID_ARGUMENT;
 675  
 676  					state_count = s->fsh.count;
 677  					state = (thread_state_t) &s->ufs.fs64;
 678  				}
 679  				else
 680  					return KERN_INVALID_ARGUMENT;
 681  
 682  				flavor = s->fsh.flavor;
 683  				break;
 684  			}
 685  			case x86_DEBUG_STATE:
 686  			{
 687  				x86_debug_state_t* s = (x86_debug_state_t*) state;
 688  
 689  				if (state_count < x86_DEBUG_STATE_COUNT)
 690  					return KERN_INVALID_ARGUMENT;
 691  
 692  				if (s->dsh.flavor == x86_DEBUG_STATE32)
 693  				{
 694  					if (dtask->architecture == dserver_rpc_architecture_x86_64)
 695  						return KERN_INVALID_ARGUMENT;
 696  
 697  					state_count = s->dsh.count;
 698  					state = (thread_state_t) &s->uds.ds32;
 699  				}
 700  				else if (s->dsh.flavor == x86_DEBUG_STATE64)
 701  				{
 702  					if (dtask->architecture != dserver_rpc_architecture_x86_64)
 703  						return KERN_INVALID_ARGUMENT;
 704  
 705  					state_count = s->dsh.count;
 706  					state = (thread_state_t) &s->uds.ds64;
 707  				}
 708  				else
 709  					return KERN_INVALID_ARGUMENT;
 710  
 711  				flavor = s->dsh.flavor;
 712  				break;
 713  			}
 714  		}
 715  
 716  		switch (flavor)
 717  		{
 718  			case x86_THREAD_STATE32:
 719  			{
 720  				if (state_count < x86_THREAD_STATE32_COUNT)
 721  					return KERN_INVALID_ARGUMENT;
 722  				if (dtask->architecture == dserver_rpc_architecture_x86_64)
 723  					return KERN_INVALID_ARGUMENT;
 724  
 725  				const x86_thread_state32_t* s = (x86_thread_state32_t*) state;
 726  
 727  				memcpy(&user_state->thread_state.uts.ts32, s, sizeof(*s));
 728  				return KERN_SUCCESS;
 729  			}
 730  			case x86_THREAD_STATE64:
 731  			{
 732  				if (state_count < x86_THREAD_STATE64_COUNT)
 733  					return KERN_INVALID_ARGUMENT;
 734  				if (dtask->architecture != dserver_rpc_architecture_x86_64)
 735  					return KERN_INVALID_ARGUMENT;
 736  
 737  				const x86_thread_state64_t* s = (x86_thread_state64_t*) state;
 738  
 739  				// printf("Saving RIP 0x%lx, FLG 0x%lx\n", s->rip, s->rflags);
 740  
 741  				memcpy(&user_state->thread_state.uts.ts64, s, sizeof(*s));
 742  				return KERN_SUCCESS;
 743  			}
 744  			case x86_FLOAT_STATE32:
 745  			{
 746  				if (state_count < x86_FLOAT_STATE32_COUNT)
 747  					return KERN_INVALID_ARGUMENT;
 748  				if (dtask->architecture == dserver_rpc_architecture_x86_64)
 749  					return KERN_INVALID_ARGUMENT;
 750  
 751  				const x86_float_state32_t* s = (x86_float_state32_t*) state;
 752  
 753  				memcpy(&user_state->float_state.ufs.fs32, s, sizeof(*s));
 754  				return KERN_SUCCESS;
 755  			}
 756  
 757  			case x86_FLOAT_STATE64:
 758  			{
 759  				if (state_count < x86_FLOAT_STATE64_COUNT)
 760  					return KERN_INVALID_ARGUMENT;
 761  				if (dtask->architecture != dserver_rpc_architecture_x86_64)
 762  					return KERN_INVALID_ARGUMENT;
 763  
 764  				const x86_float_state64_t* s = (x86_float_state64_t*) state;
 765  
 766  				memcpy(&user_state->float_state.ufs.fs64, s, sizeof(*s));
 767  				return KERN_SUCCESS;
 768  			}
 769  			case x86_DEBUG_STATE32:
 770  			{
 771  				if (dtask->architecture == dserver_rpc_architecture_x86_64)
 772  					return KERN_INVALID_ARGUMENT;
 773  				const x86_debug_state32_t* s = (x86_debug_state32_t*) state;
 774  				x86_debug_state64_t s64;
 775  
 776  				s64.dr0 = s->dr0;
 777  				s64.dr1 = s->dr1;
 778  				s64.dr2 = s->dr2;
 779  				s64.dr3 = s->dr3;
 780  				s64.dr4 = s->dr4;
 781  				s64.dr5 = s->dr5;
 782  				s64.dr6 = s->dr6;
 783  				s64.dr7 = s->dr7;
 784  
 785  				return thread_set_state(thread, x86_DEBUG_STATE64, (thread_state_t) &s,
 786  						x86_DEBUG_STATE64_COUNT);
 787  			}
 788  			case x86_DEBUG_STATE64:
 789  			{
 790  #if 0
 791  				if (dtask->architecture != dserver_rpc_architecture_x86_64)
 792  					return KERN_INVALID_ARGUMENT;
 793  
 794  				const x86_debug_state64_t* s = (x86_debug_state64_t*) state;
 795  
 796  				struct thread_struct *lthread = &ltask->thread;
 797  				int i;
 798  
 799  				for (i = 0; i < 4; i++)
 800  				{
 801  					__uint64_t addr = (&s->dr0)[i];
 802  
 803  					if (lthread->ptrace_bps[i] != NULL)
 804  					{
 805  						struct perf_event* pevent = lthread->ptrace_bps[i];
 806  						struct perf_event_attr attr = pevent->attr;
 807  
 808  						if (s->dr7 & (1 << (2*i)))
 809  						{
 810  							// Possibly modify an existing watchpoint
 811  							fill_breakpoint(&attr, s->dr7, i);
 812  							attr.bp_addr = addr;
 813  
 814  							if (memcmp(&attr, &pevent->attr, sizeof(attr)) == 0)
 815  								continue; // no change
 816  						}
 817  						else
 818  						{
 819  							// Disable the watchpoint
 820  							if (attr.disabled)
 821  								continue; // already disabled
 822  
 823  							attr.disabled = true;
 824  						}
 825  
 826  						modify_user_hw_breakpoint(pevent, &attr);
 827  					}
 828  					else if (s->dr7 & (1 << (2*i)))
 829  					{
 830  						// Create a new watchpoint
 831  						struct perf_event_attr attr;
 832  						struct perf_event* pevent;
 833  
 834  						fill_breakpoint(&attr, s->dr7, i);
 835  						attr.bp_addr = addr;
 836  
 837  						pevent = register_user_hw_breakpoint(&attr, watchpoint_callback, NULL, ltask);
 838  						lthread->ptrace_bps[i] = pevent;
 839  					}
 840  				}
 841  
 842  				return KERN_SUCCESS;
 843  #else
 844  				// TODO
 845  				dtape_stub("debug state");
 846  				return KERN_NOT_SUPPORTED;
 847  #endif
 848  			}
 849  			default:
 850  				return KERN_INVALID_ARGUMENT;
 851  		}
 852  	}
 853  	return KERN_FAILURE;
 854  }
 855  
 856  kern_return_t
 857  thread_get_state_internal(
 858      register thread_t       thread,
 859      int                     flavor,
 860      thread_state_t          state,          /* pointer to OUT array */
 861      mach_msg_type_number_t  *state_count,   /*IN/OUT*/
 862      boolean_t               to_user)
 863  {
 864  	dtape_thread_t* dthread = dtape_thread_for_xnu_thread(thread);
 865  	dtape_task_t* dtask = dtape_task_for_thread(dthread);
 866  	dtape_thread_user_state_t* user_state = LIST_FIRST(&dthread->user_states);
 867  
 868  	// to_user is used to indicate whether to perform any necessary conversions from kernel to user thread state representations
 869  	// it currently only does something on ARM64 when the authenticated pointers (`ptrauth_calls`) feature is enabled,
 870  	// so i think it's safe to say we can ignore it in Darling (even when we get ARM support)
 871  
 872  	if (dtask->architecture == dserver_rpc_architecture_x86_64 || dtask->architecture == dserver_rpc_architecture_i386) {
 873  		switch (flavor)
 874  		{
 875  			// The following flavors automatically select 32 or 64-bit state
 876  			// based on process type.
 877  			case x86_THREAD_STATE:
 878  			{
 879  				x86_thread_state_t* s = (x86_thread_state_t*) state;
 880  
 881  				if (*state_count < x86_THREAD_STATE_COUNT)
 882  					return KERN_INVALID_ARGUMENT;
 883  				if (dtask->architecture == dserver_rpc_architecture_x86_64)
 884  				{
 885  					s->tsh.flavor = flavor = x86_THREAD_STATE64;
 886  					s->tsh.count = x86_THREAD_STATE64_COUNT;
 887  					state = (thread_state_t) &s->uts.ts64;
 888  				}
 889  				else
 890  				{
 891  					s->tsh.flavor = flavor = x86_THREAD_STATE32;
 892  					s->tsh.count = x86_THREAD_STATE32_COUNT;
 893  					state = (thread_state_t) &s->uts.ts32;
 894  				}
 895  				*state_count = x86_THREAD_STATE_COUNT;
 896  				state_count = &s->tsh.count;
 897  
 898  				break;
 899  			}
 900  			case x86_FLOAT_STATE:
 901  			{
 902  				x86_float_state_t* s = (x86_float_state_t*) state;
 903  
 904  				if (*state_count < x86_FLOAT_STATE_COUNT)
 905  					return KERN_INVALID_ARGUMENT;
 906  
 907  				if (dtask->architecture == dserver_rpc_architecture_x86_64)
 908  				{
 909  					s->fsh.flavor = flavor = x86_FLOAT_STATE64;
 910  					s->fsh.count = x86_FLOAT_STATE64_COUNT;
 911  					state = (thread_state_t) &s->ufs.fs64;
 912  				}
 913  				else
 914  				{
 915  					s->fsh.flavor = flavor = x86_FLOAT_STATE32;
 916  					s->fsh.count = x86_FLOAT_STATE32_COUNT;
 917  					state = (thread_state_t) &s->ufs.fs32;
 918  				}
 919  				*state_count = x86_FLOAT_STATE_COUNT;
 920  				state_count = &s->fsh.count;
 921  				break;
 922  			}
 923  			case x86_DEBUG_STATE:
 924  			{
 925  				x86_debug_state_t* s = (x86_debug_state_t*) state;
 926  
 927  				if (*state_count < x86_DEBUG_STATE_COUNT)
 928  					return KERN_INVALID_ARGUMENT;
 929  
 930  				if (dtask->architecture == dserver_rpc_architecture_x86_64)
 931  				{
 932  					s->dsh.flavor = flavor = x86_DEBUG_STATE64;
 933  					s->dsh.count = x86_DEBUG_STATE64_COUNT;
 934  					state = (thread_state_t) &s->uds.ds64;
 935  				}
 936  				else
 937  				{
 938  					s->dsh.flavor = flavor = x86_DEBUG_STATE32;
 939  					s->dsh.count = x86_DEBUG_STATE32_COUNT;
 940  					state = (thread_state_t) &s->uds.ds32;
 941  				}
 942  				*state_count = x86_DEBUG_STATE_COUNT;
 943  				state_count = &s->dsh.count;
 944  				break;
 945  			}
 946  		}
 947  
 948  		switch (flavor)
 949  		{
 950  			case x86_THREAD_STATE32:
 951  			{
 952  				if (*state_count < x86_THREAD_STATE32_COUNT)
 953  					return KERN_INVALID_ARGUMENT;
 954  				if (dtask->architecture == dserver_rpc_architecture_x86_64)
 955  					return KERN_INVALID_ARGUMENT;
 956  
 957  				x86_thread_state32_t* s = (x86_thread_state32_t*) state;
 958  
 959  				*state_count = x86_THREAD_STATE32_COUNT;
 960  
 961  				memcpy(s, &user_state->thread_state.uts.ts32, sizeof(*s));
 962  
 963  				return KERN_SUCCESS;
 964  			}
 965  			case x86_FLOAT_STATE32:
 966  			{
 967  				if (*state_count < x86_FLOAT_STATE32_COUNT)
 968  					return KERN_INVALID_ARGUMENT;
 969  				if (dtask->architecture == dserver_rpc_architecture_x86_64)
 970  					return KERN_INVALID_ARGUMENT;
 971  
 972  				x86_float_state32_t* s = (x86_float_state32_t*) state;
 973  
 974  				*state_count = x86_FLOAT_STATE32_COUNT;
 975  				memcpy(s, &user_state->float_state.ufs.fs32, sizeof(*s));
 976  
 977  				return KERN_SUCCESS;
 978  			}
 979  			case x86_FLOAT_STATE64: // these two are practically identical
 980  			{
 981  				if (*state_count < x86_FLOAT_STATE64_COUNT)
 982  					return KERN_INVALID_ARGUMENT;
 983  
 984  				x86_float_state64_t* s = (x86_float_state64_t*) state;
 985  
 986  				*state_count = x86_FLOAT_STATE64_COUNT;
 987  				memcpy(s, &user_state->float_state.ufs.fs64, sizeof(*s));
 988  
 989  				return KERN_SUCCESS;
 990  			}
 991  			case x86_THREAD_STATE64:
 992  			{
 993  				if (*state_count < x86_THREAD_STATE64_COUNT)
 994  					return KERN_INVALID_ARGUMENT;
 995  				if (dtask->architecture != dserver_rpc_architecture_x86_64)
 996  					return KERN_INVALID_ARGUMENT;
 997  
 998  				x86_thread_state64_t* s = (x86_thread_state64_t*) state;
 999  				*state_count = x86_THREAD_STATE64_COUNT;
1000  
1001  				memcpy(s, &user_state->thread_state.uts.ts64, sizeof(*s));
1002  
1003  				// printf("Returning RIP 0x%x\n", s->rip);
1004  
1005  				return KERN_SUCCESS;
1006  			}
1007  			case x86_DEBUG_STATE32:
1008  			{
1009  				if (*state_count < x86_DEBUG_STATE32_COUNT)
1010  					return KERN_INVALID_ARGUMENT;
1011  				if (dtask->architecture == dserver_rpc_architecture_x86_64)
1012  					return KERN_INVALID_ARGUMENT;
1013  
1014  				x86_debug_state32_t* s = (x86_debug_state32_t*) state;
1015  				*state_count = x86_DEBUG_STATE32_COUNT;
1016  
1017  				// Call self and translate from 64-bit
1018  				x86_debug_state64_t s64;
1019  				mach_msg_type_number_t count = x86_DEBUG_STATE64_COUNT;
1020  
1021  				kern_return_t kr = thread_get_state_internal(thread, x86_DEBUG_STATE64,
1022  						(thread_state_t) &s64, &count, FALSE);
1023  
1024  				if (kr != KERN_SUCCESS)
1025  					return kr;
1026  
1027  				s->dr0 = s64.dr0;
1028  				s->dr1 = s64.dr1;
1029  				s->dr2 = s64.dr2;
1030  				s->dr3 = s64.dr3;
1031  				s->dr4 = s64.dr4;
1032  				s->dr5 = s64.dr5;
1033  				s->dr6 = s64.dr6;
1034  				s->dr7 = s64.dr7;
1035  
1036  				return KERN_SUCCESS;
1037  			}
1038  			case x86_DEBUG_STATE64:
1039  			{
1040  #if 0
1041  				if (*state_count < x86_DEBUG_STATE64_COUNT)
1042  					return KERN_INVALID_ARGUMENT;
1043  				if (dtask->architecture != dserver_rpc_architecture_x86_64)
1044  					return KERN_INVALID_ARGUMENT;
1045  
1046  				x86_debug_state64_t* s = (x86_debug_state64_t*) state;
1047  				*state_count = x86_DEBUG_STATE64_COUNT;
1048  
1049  				memset(s, 0, sizeof(*s));
1050  
1051  				struct thread_struct *lthread = &ltask->thread;
1052  				int i;
1053  
1054  				for (i = 0; i < 4; i++)
1055  				{
1056  					if (lthread->ptrace_bps[i] != NULL)
1057  					{
1058  						const struct perf_event_attr* attr = &lthread->ptrace_bps[i]->attr;
1059  
1060  						if (!attr->disabled && attr->bp_type != HW_BREAKPOINT_EMPTY)
1061  							s->dr7 |= 1 << (2*i); // set local enable flag
1062  
1063  						switch (attr->bp_type)
1064  						{
1065  							case HW_BREAKPOINT_W:
1066  								s->dr7 |= 1 << (16 + i*4);
1067  								break;
1068  							case HW_BREAKPOINT_RW:
1069  							case HW_BREAKPOINT_R:
1070  								s->dr7 |= 3 << (16 + i*4);
1071  								break;
1072  							case HW_BREAKPOINT_X:
1073  								break;
1074  						}
1075  
1076  						switch (attr->bp_len)
1077  						{
1078  							case HW_BREAKPOINT_LEN_1:
1079  								break;
1080  							case HW_BREAKPOINT_LEN_2:
1081  								s->dr7 |= 1 << (18 + i*4);
1082  								break;
1083  							case HW_BREAKPOINT_LEN_4:
1084  								s->dr7 |= 3 << (18 + i*4);
1085  								break;
1086  							case HW_BREAKPOINT_LEN_8:
1087  								s->dr7 |= 2 << (18 + i*4);
1088  								break;
1089  						}
1090  
1091  						(&s->dr0)[i] = attr->bp_addr;
1092  					}
1093  				}
1094  
1095  				return KERN_SUCCESS;
1096  #else
1097  				// TODO
1098  				return KERN_NOT_SUPPORTED;
1099  #endif
1100  			}
1101  			default:
1102  				return KERN_INVALID_ARGUMENT;
1103  		}
1104  	} else {
1105  		return KERN_FAILURE;
1106  	}
1107  }
1108  
1109  kern_return_t
1110  thread_get_state(
1111  	thread_t                thread,
1112  	int                     flavor,
1113  	thread_state_t          state, /* pointer to OUT array */
1114  	mach_msg_type_number_t  *state_count) /*IN/OUT*/
1115  {
1116  	return thread_get_state_internal(thread, flavor, state, state_count, FALSE);
1117  }
1118  
1119  thread_qos_t thread_get_requested_qos(thread_t thread, int* relpri) {
1120  	dtape_stub_safe();
1121  	*relpri = 0;
1122  	return THREAD_QOS_DEFAULT;
1123  };
1124  
1125  thread_qos_t thread_user_promotion_qos_for_pri(int priority) {
1126  	dtape_stub_safe();
1127  	return THREAD_QOS_DEFAULT;
1128  };
1129  
1130  void thread_guard_violation(thread_t thread, mach_exception_data_type_t code, mach_exception_data_type_t subcode, boolean_t fatal) {
1131  	dtape_stub();
1132  };
1133  
1134  void thread_port_with_flavor_notify(mach_msg_header_t* msg) {
1135  	dtape_stub();
1136  };
1137  
1138  boolean_t thread_recompute_kernel_promotion_locked(thread_t thread) {
1139  	dtape_stub_safe();
1140  	return FALSE;
1141  };
1142  
1143  boolean_t thread_recompute_user_promotion_locked(thread_t thread) {
1144  	dtape_stub_safe();
1145  	return FALSE;
1146  };
1147  
1148  void thread_set_pending_block_hint(thread_t thread, block_hint_t block_hint) {
1149  	thread->pending_block_hint = block_hint;
1150  };
1151  
1152  kern_return_t thread_set_state_from_user(thread_t thread, int flavor, thread_state_t state, mach_msg_type_number_t state_count) {
1153  	return thread_set_state(thread, flavor, state, state_count);
1154  };
1155  
1156  void thread_set_eager_preempt(thread_t thread) {
1157  	dtape_stub_safe();
1158  };
1159  
1160  void sched_thread_promote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj) {
1161  	dtape_stub_safe();
1162  };
1163  
1164  void sched_thread_unpromote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj) {
1165  	dtape_stub_safe();
1166  };
1167  
1168  void thread_poll_yield(thread_t self) {
1169  	dtape_stub_safe();
1170  };
1171  
1172  kern_return_t act_get_state_to_user(thread_t thread, int flavor, thread_state_t state, mach_msg_type_number_t* count) {
1173  	dtape_stub_unsafe();
1174  };
1175  
1176  kern_return_t act_set_state_from_user(thread_t thread, int flavor, thread_state_t state, mach_msg_type_number_t count) {
1177  	dtape_stub_unsafe();
1178  };
1179  
1180  kern_return_t thread_abort(thread_t thread) {
1181  	dtape_stub_unsafe();
1182  };
1183  
1184  kern_return_t thread_abort_safely(thread_t thread) {
1185  	// TODO: actually do something?
1186  	//       in the LKM, we used to call `kick_process` here
1187  	//       (which would presumably interrupt any syscalls).
1188  	//       to replicate that, we'd probably have to use another
1189  	//       real-time signal with SA_RESTART off.
1190  	dtape_stub();
1191  	return KERN_SUCCESS;
1192  };
1193  
1194  kern_return_t thread_convert_thread_state(thread_t thread, int direction, thread_state_flavor_t flavor, thread_state_t in_state, mach_msg_type_number_t in_state_count, thread_state_t out_state, mach_msg_type_number_t* out_state_count) {
1195  	dtape_stub_unsafe();
1196  };
1197  
1198  kern_return_t thread_create_from_user(task_t task, thread_t* new_thread) {
1199  	dtape_stub_unsafe();
1200  };
1201  
1202  kern_return_t thread_create_running_from_user(task_t task, int flavor, thread_state_t new_state, mach_msg_type_number_t new_state_count, thread_t* new_thread) {
1203  	dtape_stub_unsafe();
1204  };
1205  
1206  kern_return_t thread_depress_abort_from_user(thread_t thread) {
1207  	dtape_stub_safe();
1208  	return KERN_SUCCESS;
1209  };
1210  
1211  kern_return_t thread_info(thread_t xthread, thread_flavor_t flavor, thread_info_t thread_info_out, mach_msg_type_number_t* thread_info_count) {
1212  	dtape_thread_t* thread = dtape_thread_for_xnu_thread(xthread);
1213  
1214  	switch (flavor) {
1215  		case THREAD_IDENTIFIER_INFO: {
1216  			if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) {
1217  				return KERN_INVALID_ARGUMENT;
1218  			}
1219  			*thread_info_count = THREAD_IDENTIFIER_INFO_COUNT;
1220  
1221  			thread_identifier_info_t info = (thread_identifier_info_t)thread_info_out;
1222  
1223  			thread_lock(xthread);
1224  
1225  			info->thread_id = xthread->thread_id;
1226  			info->thread_handle = thread->pthread_handle;
1227  			info->dispatch_qaddr = thread->dispatch_qaddr;
1228  
1229  			thread_unlock(xthread);
1230  
1231  			return KERN_SUCCESS;
1232  		};
1233  
1234  		case THREAD_BASIC_INFO: {
1235  			if (*thread_info_count < THREAD_BASIC_INFO_COUNT) {
1236  				return KERN_INVALID_ARGUMENT;
1237  			}
1238  			*thread_info_count = THREAD_BASIC_INFO_COUNT;
1239  
1240  			thread_basic_info_t info = (thread_basic_info_t) thread_info_out;
1241  			dtape_thread_state_t thread_state = -1;
1242  
1243  			thread_lock(xthread);
1244  
1245  			// TODO: fill in these values properly
1246  			info->cpu_usage = 0;
1247  			info->flags = 0;
1248  			info->policy = 0;
1249  			info->sleep_time = 0;
1250  			info->system_time.seconds = 0;
1251  			info->system_time.microseconds = 0;
1252  			info->user_time.seconds = 0;
1253  			info->user_time.microseconds = 0;
1254  
1255  			info->suspend_count = xthread->user_stop_count;
1256  
1257  			thread_unlock(xthread);
1258  
1259  			// check if the thread is currently waiting suspended; in that case, the `thread_get_state` hook will
1260  			// report that it is waiting interruptibly (because that's what Linux sees), but we know that it's
1261  			// actually "stopped" waiting for us to resume it.
1262  			dtape_mutex_lock(&thread->suspension_mutex);
1263  			if (thread->waiting_suspended) {
1264  				thread_state = dtape_thread_state_stopped;
1265  			}
1266  			dtape_mutex_unlock(&thread->suspension_mutex);
1267  
1268  			if (thread_state == -1) {
1269  				thread_state = dtape_hooks->thread_get_state(thread->context);
1270  			}
1271  
1272  			switch (thread_state) {
1273  				case dtape_thread_state_dead:
1274  					info->run_state = 0;
1275  					break;
1276  				case dtape_thread_state_running:
1277  					info->run_state = TH_STATE_RUNNING;
1278  					break;
1279  				case dtape_thread_state_stopped:
1280  					info->run_state = TH_STATE_STOPPED;
1281  					break;
1282  				case dtape_thread_state_interruptible:
1283  					info->run_state = TH_STATE_WAITING;
1284  					break;
1285  				case dtape_thread_state_uninterruptible:
1286  					info->run_state = TH_STATE_UNINTERRUPTIBLE;
1287  					break;
1288  				default:
1289  					panic("invalid thread state: %d; this should be impossible", thread_state);
1290  			}
1291  
1292  			return KERN_SUCCESS;
1293  		};
1294  
1295  		default:
1296  			dtape_stub_unsafe("Unimplemented flavor");
1297  	}
1298  };
1299  
1300  kern_return_t thread_policy(thread_t thread, policy_t policy, policy_base_t base, mach_msg_type_number_t count, boolean_t set_limit) {
1301  	dtape_stub_safe();
1302  	return KERN_SUCCESS;
1303  };
1304  
1305  kern_return_t thread_policy_get(thread_t thread, thread_policy_flavor_t flavor, thread_policy_t policy_info, mach_msg_type_number_t* count, boolean_t* get_default) {
1306  	dtape_stub_unsafe();
1307  };
1308  
1309  kern_return_t thread_policy_set(thread_t thread, thread_policy_flavor_t flavor, thread_policy_t policy_info, mach_msg_type_number_t count) {
1310  	dtape_stub_safe();
1311  	return KERN_SUCCESS;
1312  };
1313  
1314  kern_return_t thread_set_mach_voucher(thread_t thread, ipc_voucher_t voucher) {
1315  	dtape_stub_unsafe();
1316  };
1317  
1318  kern_return_t thread_set_policy(thread_t thread, processor_set_t pset, policy_t policy, policy_base_t base, mach_msg_type_number_t base_count, policy_limit_t limit, mach_msg_type_number_t limit_count) {
1319  	dtape_stub_unsafe();
1320  };
1321  
1322  kern_return_t thread_wire(host_priv_t host_priv, thread_t thread, boolean_t wired) {
1323  	dtape_stub_unsafe();
1324  };
1325  
1326  kern_return_t thread_getstatus_to_user(thread_t thread, int flavor, thread_state_t tstate, mach_msg_type_number_t* count) {
1327  	dtape_stub_unsafe();
1328  };
1329  
1330  kern_return_t thread_setstatus_from_user(thread_t thread, int flavor, thread_state_t tstate, mach_msg_type_number_t count) {
1331  	dtape_stub_unsafe();
1332  };
1333  
1334  boolean_t thread_should_abort(thread_t thread) {
1335  	dtape_stub();
1336  	return FALSE;
1337  };
1338  
1339  static wait_result_t thread_handoff_internal(thread_t thread, thread_continue_t continuation, void* parameter, thread_handoff_option_t option) {
1340  	if (thread != THREAD_NULL) {
1341  		if (continuation == NULL || (option & THREAD_HANDOFF_SETRUN_NEEDED)) {
1342  			thread_deallocate_safe(thread);
1343  		}
1344  
1345  		// in the real thread_handoff_internal(), an attempt is made to grab the thread to handoff to.
1346  		// if it could not be pulled from its runq, the current thread simply blocks with thread_block_parameter().
1347  		// therefore, it's not necessary to actually handoff to the given thread, so we don't do that, in order to make our implementation easier.
1348  	}
1349  
1350  	return thread_block_parameter(continuation, parameter);
1351  };
1352  
1353  void thread_hold(thread_t xthread) {
1354  	dtape_thread_t* thread = dtape_thread_for_xnu_thread(xthread);
1355  	dtape_log_debug("sigexc: thread_hold(%p)\n", xthread);
1356  	// CHECKME: the LKM was always sending the signal whenever thread_hold got called;
1357  	//          we mimic XNU here instead. check whether this actually works as expected.
1358  	if (xthread->suspend_count++ == 0) {
1359  		// This signal leads to sigexc.c which will end up calling darlingserver;
1360  		// darlingserver will hold the caller so long as the suspend_count > 0.
1361  		dtape_hooks->thread_send_signal(thread->context, LINUX_SIGRTMIN);
1362  	}
1363  };
1364  
1365  void thread_release(thread_t xthread) {
1366  	dtape_thread_t* thread = dtape_thread_for_xnu_thread(xthread);
1367  	dtape_log_debug("sigexc: thread_release(%p)\n", xthread);
1368  	xthread->suspend_count--;
1369  	dtape_hooks->thread_resume(thread->context);
1370  };
1371  
1372  void thread_wait(thread_t xthread, boolean_t until_not_runnable) {
1373  	dtape_thread_t* thread = dtape_thread_for_xnu_thread(xthread);
1374  	dtape_mutex_lock(&thread->suspension_mutex);
1375  	while (!thread->waiting_suspended) {
1376  		dtape_condvar_wait(&thread->suspension_condvar, &thread->suspension_mutex);
1377  	}
1378  	dtape_mutex_unlock(&thread->suspension_mutex);
1379  };
1380  
1381  // ignore the lock timeout
1382  #define LockTimeOutUsec UINT32_MAX
1383  
1384  // <copied from="xnu://7195.141.2/osfmk/kern/sched_prim.c">
1385  
1386  /*
1387   *	thread_wakeup_prim:
1388   *
1389   *	Common routine for thread_wakeup, thread_wakeup_with_result,
1390   *	and thread_wakeup_one.
1391   *
1392   */
1393  kern_return_t
1394  thread_wakeup_prim(
1395  	event_t          event,
1396  	boolean_t        one_thread,
1397  	wait_result_t    result)
1398  {
1399  	if (__improbable(event == NO_EVENT)) {
1400  		panic("%s() called with NO_EVENT", __func__);
1401  	}
1402  
1403  	struct waitq *wq = global_eventq(event);
1404  
1405  	if (one_thread) {
1406  		return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
1407  	} else {
1408  		return waitq_wakeup64_all(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
1409  	}
1410  }
1411  
1412  /*
1413   * Wakeup a specified thread if and only if it's waiting for this event
1414   */
1415  kern_return_t
1416  thread_wakeup_thread(
1417  	event_t         event,
1418  	thread_t        thread)
1419  {
1420  	if (__improbable(event == NO_EVENT)) {
1421  		panic("%s() called with NO_EVENT", __func__);
1422  	}
1423  
1424  	if (__improbable(thread == THREAD_NULL)) {
1425  		panic("%s() called with THREAD_NULL", __func__);
1426  	}
1427  
1428  	struct waitq *wq = global_eventq(event);
1429  
1430  	return waitq_wakeup64_thread(wq, CAST_EVENT64_T(event), thread, THREAD_AWAKENED);
1431  }
1432  
1433  /*
1434   *	assert_wait:
1435   *
1436   *	Assert that the current thread is about to go to
1437   *	sleep until the specified event occurs.
1438   */
1439  wait_result_t
1440  assert_wait(
1441  	event_t                         event,
1442  	wait_interrupt_t        interruptible)
1443  {
1444  	if (__improbable(event == NO_EVENT)) {
1445  		panic("%s() called with NO_EVENT", __func__);
1446  	}
1447  
1448  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1449  	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1450  	    VM_KERNEL_UNSLIDE_OR_PERM(event), 0, 0, 0, 0);
1451  
1452  	struct waitq *waitq;
1453  	waitq = global_eventq(event);
1454  	return waitq_assert_wait64(waitq, CAST_EVENT64_T(event), interruptible, TIMEOUT_WAIT_FOREVER);
1455  }
1456  
1457  wait_result_t
1458  assert_wait_timeout(
1459  	event_t                         event,
1460  	wait_interrupt_t        interruptible,
1461  	uint32_t                        interval,
1462  	uint32_t                        scale_factor)
1463  {
1464  	thread_t                        thread = current_thread();
1465  	wait_result_t           wresult;
1466  	uint64_t                        deadline;
1467  	spl_t                           s;
1468  
1469  	if (__improbable(event == NO_EVENT)) {
1470  		panic("%s() called with NO_EVENT", __func__);
1471  	}
1472  
1473  	struct waitq *waitq;
1474  	waitq = global_eventq(event);
1475  
1476  	s = splsched();
1477  	waitq_lock(waitq);
1478  
1479  	clock_interval_to_deadline(interval, scale_factor, &deadline);
1480  
1481  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1482  	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1483  	    VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1484  
1485  	wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1486  	    interruptible,
1487  	    TIMEOUT_URGENCY_SYS_NORMAL,
1488  	    deadline, TIMEOUT_NO_LEEWAY,
1489  	    thread);
1490  
1491  	waitq_unlock(waitq);
1492  	splx(s);
1493  	return wresult;
1494  }
1495  
1496  wait_result_t
1497  assert_wait_deadline(
1498  	event_t                         event,
1499  	wait_interrupt_t        interruptible,
1500  	uint64_t                        deadline)
1501  {
1502  	thread_t                        thread = current_thread();
1503  	wait_result_t           wresult;
1504  	spl_t                           s;
1505  
1506  	if (__improbable(event == NO_EVENT)) {
1507  		panic("%s() called with NO_EVENT", __func__);
1508  	}
1509  
1510  	struct waitq *waitq;
1511  	waitq = global_eventq(event);
1512  
1513  	s = splsched();
1514  	waitq_lock(waitq);
1515  
1516  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1517  	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1518  	    VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1519  
1520  	wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1521  	    interruptible,
1522  	    TIMEOUT_URGENCY_SYS_NORMAL, deadline,
1523  	    TIMEOUT_NO_LEEWAY, thread);
1524  	waitq_unlock(waitq);
1525  	splx(s);
1526  	return wresult;
1527  }
1528  
1529  wait_result_t
1530  assert_wait_deadline_with_leeway(
1531  	event_t                         event,
1532  	wait_interrupt_t        interruptible,
1533  	wait_timeout_urgency_t  urgency,
1534  	uint64_t                        deadline,
1535  	uint64_t                        leeway)
1536  {
1537  	thread_t                        thread = current_thread();
1538  	wait_result_t           wresult;
1539  	spl_t                           s;
1540  
1541  	if (__improbable(event == NO_EVENT)) {
1542  		panic("%s() called with NO_EVENT", __func__);
1543  	}
1544  
1545  	struct waitq *waitq;
1546  	waitq = global_eventq(event);
1547  
1548  	s = splsched();
1549  	waitq_lock(waitq);
1550  
1551  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1552  	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1553  	    VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1554  
1555  	wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1556  	    interruptible,
1557  	    urgency, deadline, leeway,
1558  	    thread);
1559  	waitq_unlock(waitq);
1560  	splx(s);
1561  	return wresult;
1562  }
1563  
1564  /*
1565   *	Routine: clear_wait_internal
1566   *
1567   *		Clear the wait condition for the specified thread.
1568   *		Start the thread executing if that is appropriate.
1569   *	Arguments:
1570   *		thread		thread to awaken
1571   *		result		Wakeup result the thread should see
1572   *	Conditions:
1573   *		At splsched
1574   *		the thread is locked.
1575   *	Returns:
1576   *		KERN_SUCCESS		thread was rousted out a wait
1577   *		KERN_FAILURE		thread was waiting but could not be rousted
1578   *		KERN_NOT_WAITING	thread was not waiting
1579   */
1580  __private_extern__ kern_return_t
1581  clear_wait_internal(
1582  	thread_t                thread,
1583  	wait_result_t   wresult)
1584  {
1585  	uint32_t        i = LockTimeOutUsec;
1586  	struct waitq *waitq = thread->waitq;
1587  
1588  	do {
1589  		if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT)) {
1590  			return KERN_FAILURE;
1591  		}
1592  
1593  		if (waitq != NULL) {
1594  			if (!waitq_pull_thread_locked(waitq, thread)) {
1595  				thread_unlock(thread);
1596  				delay(1);
1597  				if (i > 0 && !machine_timeout_suspended()) {
1598  					i--;
1599  				}
1600  				thread_lock(thread);
1601  				if (waitq != thread->waitq) {
1602  					return KERN_NOT_WAITING;
1603  				}
1604  				continue;
1605  			}
1606  		}
1607  
1608  		/* TODO: Can we instead assert TH_TERMINATE is not set?  */
1609  		if ((thread->state & (TH_WAIT | TH_TERMINATE)) == TH_WAIT) {
1610  			return thread_go(thread, wresult, WQ_OPTION_NONE);
1611  		} else {
1612  			return KERN_NOT_WAITING;
1613  		}
1614  	} while (i > 0);
1615  
1616  	panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1617  	    thread, waitq, cpu_number());
1618  
1619  	return KERN_FAILURE;
1620  }
1621  
1622  
1623  /*
1624   *	clear_wait:
1625   *
1626   *	Clear the wait condition for the specified thread.  Start the thread
1627   *	executing if that is appropriate.
1628   *
1629   *	parameters:
1630   *	  thread		thread to awaken
1631   *	  result		Wakeup result the thread should see
1632   */
1633  kern_return_t
1634  clear_wait(
1635  	thread_t                thread,
1636  	wait_result_t   result)
1637  {
1638  	kern_return_t ret;
1639  	spl_t           s;
1640  
1641  	s = splsched();
1642  	thread_lock(thread);
1643  	ret = clear_wait_internal(thread, result);
1644  	thread_unlock(thread);
1645  	splx(s);
1646  	return ret;
1647  }
1648  
1649  /*
1650   *	Thread wait timer expiration.
1651   */
1652  void
1653  thread_timer_expire(
1654  	void                    *p0,
1655  	__unused void   *p1)
1656  {
1657  	thread_t                thread = p0;
1658  	spl_t                   s;
1659  
1660  	assert_thread_magic(thread);
1661  
1662  	s = splsched();
1663  	thread_lock(thread);
1664  	if (--thread->wait_timer_active == 0) {
1665  		if (thread->wait_timer_is_set) {
1666  			thread->wait_timer_is_set = FALSE;
1667  			clear_wait_internal(thread, THREAD_TIMED_OUT);
1668  		}
1669  	}
1670  	thread_unlock(thread);
1671  	splx(s);
1672  }
1673  
1674  /*
1675   *	assert_wait_queue:
1676   *
1677   *	Return the global waitq for the specified event
1678   */
1679  struct waitq *
1680  assert_wait_queue(
1681  	event_t                         event)
1682  {
1683  	return global_eventq(event);
1684  }
1685  
1686  // </copied>
1687  
1688  // <copied from="xnu://7195.141.2/osfmk/kern/thread.c">
1689  
1690  kern_return_t
1691  thread_assign(
1692  	__unused thread_t                       thread,
1693  	__unused processor_set_t        new_pset)
1694  {
1695  	return KERN_FAILURE;
1696  }
1697  
1698  /*
1699   *	thread_assign_default:
1700   *
1701   *	Special version of thread_assign for assigning threads to default
1702   *	processor set.
1703   */
1704  kern_return_t
1705  thread_assign_default(
1706  	thread_t                thread)
1707  {
1708  	return thread_assign(thread, &pset0);
1709  }
1710  
1711  /*
1712   *	thread_get_assignment
1713   *
1714   *	Return current assignment for this thread.
1715   */
1716  kern_return_t
1717  thread_get_assignment(
1718  	thread_t                thread,
1719  	processor_set_t *pset)
1720  {
1721  	if (thread == NULL) {
1722  		return KERN_INVALID_ARGUMENT;
1723  	}
1724  
1725  	*pset = &pset0;
1726  
1727  	return KERN_SUCCESS;
1728  }
1729  
1730  /*
1731   *  thread_get_mach_voucher - return a voucher reference for the specified thread voucher
1732   *
1733   *  Conditions:  nothing locked
1734   *
1735   *  NOTE:       At the moment, there is no distinction between the current and effective
1736   *		vouchers because we only set them at the thread level currently.
1737   */
1738  kern_return_t
1739  thread_get_mach_voucher(
1740  	thread_act_t            thread,
1741  	mach_voucher_selector_t __unused which,
1742  	ipc_voucher_t           *voucherp)
1743  {
1744  	ipc_voucher_t           voucher;
1745  
1746  	if (THREAD_NULL == thread) {
1747  		return KERN_INVALID_ARGUMENT;
1748  	}
1749  
1750  	thread_mtx_lock(thread);
1751  	voucher = thread->ith_voucher;
1752  
1753  	if (IPC_VOUCHER_NULL != voucher) {
1754  		ipc_voucher_reference(voucher);
1755  		thread_mtx_unlock(thread);
1756  		*voucherp = voucher;
1757  		return KERN_SUCCESS;
1758  	}
1759  
1760  	thread_mtx_unlock(thread);
1761  
1762  	*voucherp = IPC_VOUCHER_NULL;
1763  	return KERN_SUCCESS;
1764  }
1765  
1766  /*
1767   *  thread_swap_mach_voucher - swap a voucher reference for the specified thread voucher
1768   *
1769   *  Conditions: callers holds a reference on the new and presumed old voucher(s).
1770   *		nothing locked.
1771   *
1772   *  This function is no longer supported.
1773   */
1774  kern_return_t
1775  thread_swap_mach_voucher(
1776  	__unused thread_t               thread,
1777  	__unused ipc_voucher_t          new_voucher,
1778  	ipc_voucher_t                   *in_out_old_voucher)
1779  {
1780  	/*
1781  	 * Currently this function is only called from a MIG generated
1782  	 * routine which doesn't release the reference on the voucher
1783  	 * addressed by in_out_old_voucher. To avoid leaking this reference,
1784  	 * a call to release it has been added here.
1785  	 */
1786  	ipc_voucher_release(*in_out_old_voucher);
1787  	return KERN_NOT_SUPPORTED;
1788  }
1789  
1790  kern_return_t
1791  kernel_thread_start_priority(
1792  	thread_continue_t       continuation,
1793  	void                            *parameter,
1794  	integer_t                       priority,
1795  	thread_t                        *new_thread)
1796  {
1797  	kern_return_t   result;
1798  	thread_t                thread;
1799  
1800  	result = kernel_thread_create(continuation, parameter, priority, &thread);
1801  	if (result != KERN_SUCCESS) {
1802  		return result;
1803  	}
1804  
1805  	*new_thread = thread;
1806  
1807  	thread_mtx_lock(thread);
1808  	thread_start(thread);
1809  	thread_mtx_unlock(thread);
1810  
1811  	return result;
1812  }
1813  
1814  kern_return_t
1815  kernel_thread_start(
1816  	thread_continue_t       continuation,
1817  	void                            *parameter,
1818  	thread_t                        *new_thread)
1819  {
1820  	return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
1821  }
1822  
1823  uint64_t
1824  thread_tid(
1825  	thread_t        thread)
1826  {
1827  	return thread != THREAD_NULL? thread->thread_id: 0;
1828  }
1829  
1830  /*
1831   *	thread_read_deallocate:
1832   *
1833   *	Drop a reference on thread read port.
1834   */
1835  void
1836  thread_read_deallocate(
1837  	thread_read_t                thread_read)
1838  {
1839  	return thread_deallocate((thread_t)thread_read);
1840  }
1841  
1842  /*
1843   *	thread_inspect_deallocate:
1844   *
1845   *	Drop a thread inspection reference.
1846   */
1847  void
1848  thread_inspect_deallocate(
1849  	thread_inspect_t                thread_inspect)
1850  {
1851  	return thread_deallocate((thread_t)thread_inspect);
1852  }
1853  
1854  // </copied>
1855  
1856  // <copied from="xnu://7195.141.2/osfmk/kern/syscall_subr.c">
1857  
1858  void
1859  thread_handoff_parameter(thread_t thread, thread_continue_t continuation,
1860      void *parameter, thread_handoff_option_t option)
1861  {
1862  	thread_handoff_internal(thread, continuation, parameter, option);
1863  	panic("NULL continuation passed to %s", __func__);
1864  	__builtin_unreachable();
1865  }
1866  
1867  wait_result_t
1868  thread_handoff_deallocate(thread_t thread, thread_handoff_option_t option)
1869  {
1870  	return thread_handoff_internal(thread, NULL, NULL, option);
1871  }
1872  
1873  // </copied>
1874  
1875  // <copied from="xnu://7195.141.2/osfmk/kern/thread_act.c">
1876  
1877  /*
1878   * Internal routine to mark a thread as waiting
1879   * right after it has been created.  The caller
1880   * is responsible to call wakeup()/thread_wakeup()
1881   * or thread_terminate() to get it going.
1882   *
1883   * Always called with the thread mutex locked.
1884   *
1885   * Task and task_threads mutexes also held
1886   * (so nobody can set the thread running before
1887   * this point)
1888   *
1889   * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
1890   * to allow termination from this point forward.
1891   */
1892  void
1893  thread_start_in_assert_wait(
1894  	thread_t                        thread,
1895  	event_t             event,
1896  	wait_interrupt_t    interruptible)
1897  {
1898  	struct waitq *waitq = assert_wait_queue(event);
1899  	wait_result_t wait_result;
1900  	spl_t spl;
1901  
1902  	spl = splsched();
1903  	waitq_lock(waitq);
1904  
1905  	/* clear out startup condition (safe because thread not started yet) */
1906  	thread_lock(thread);
1907  	assert(!thread->started);
1908  	assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
1909  	thread->state &= ~(TH_WAIT | TH_UNINT);
1910  	thread_unlock(thread);
1911  
1912  	/* assert wait interruptibly forever */
1913  	wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1914  	    interruptible,
1915  	    TIMEOUT_URGENCY_SYS_NORMAL,
1916  	    TIMEOUT_WAIT_FOREVER,
1917  	    TIMEOUT_NO_LEEWAY,
1918  	    thread);
1919  	assert(wait_result == THREAD_WAITING);
1920  
1921  	/* mark thread started while we still hold the waitq lock */
1922  	thread_lock(thread);
1923  	thread->started = TRUE;
1924  	thread_unlock(thread);
1925  
1926  	waitq_unlock(waitq);
1927  	splx(spl);
1928  }
1929  
1930  void
1931  thread_start(
1932  	thread_t                        thread)
1933  {
1934  	clear_wait(thread, THREAD_AWAKENED);
1935  	thread->started = TRUE;
1936  }
1937  
1938  kern_return_t
1939  thread_get_state_to_user(
1940  	thread_t                thread,
1941  	int                                             flavor,
1942  	thread_state_t                  state,                  /* pointer to OUT array */
1943  	mach_msg_type_number_t  *state_count)   /*IN/OUT*/
1944  {
1945  	return thread_get_state_internal(thread, flavor, state, state_count, TRUE);
1946  }
1947  
1948  kern_return_t
1949  thread_suspend(thread_t thread)
1950  {
1951  	kern_return_t result = KERN_SUCCESS;
1952  
1953  	if (thread == THREAD_NULL || thread->task == kernel_task) {
1954  		return KERN_INVALID_ARGUMENT;
1955  	}
1956  
1957  	thread_mtx_lock(thread);
1958  
1959  	if (thread->active) {
1960  		if (thread->user_stop_count++ == 0) {
1961  			thread_hold(thread);
1962  		}
1963  	} else {
1964  		result = KERN_TERMINATED;
1965  	}
1966  
1967  	thread_mtx_unlock(thread);
1968  
1969  	if (thread != current_thread() && result == KERN_SUCCESS) {
1970  		thread_wait(thread, FALSE);
1971  	}
1972  
1973  	return result;
1974  }
1975  
1976  kern_return_t
1977  thread_resume(thread_t thread)
1978  {
1979  	kern_return_t result = KERN_SUCCESS;
1980  
1981  	if (thread == THREAD_NULL || thread->task == kernel_task) {
1982  		return KERN_INVALID_ARGUMENT;
1983  	}
1984  
1985  	thread_mtx_lock(thread);
1986  
1987  	if (thread->active) {
1988  		if (thread->user_stop_count > 0) {
1989  			if (--thread->user_stop_count == 0) {
1990  				thread_release(thread);
1991  			}
1992  		} else {
1993  			result = KERN_FAILURE;
1994  		}
1995  	} else {
1996  		result = KERN_TERMINATED;
1997  	}
1998  
1999  	thread_mtx_unlock(thread);
2000  
2001  	return result;
2002  }
2003  
2004  // </copied>
2005  
2006  // <copied from="xnu://7195.141.2/bsd/uxkern/ux_exception.c">
2007  
2008  /*
2009   * Translate Mach exceptions to UNIX signals.
2010   *
2011   * ux_exception translates a mach exception, code and subcode to
2012   * a signal.  Calls machine_exception (machine dependent)
2013   * to attempt translation first.
2014   */
2015  #ifdef __DARLING__
2016  int
2017  #else
2018  static int
2019  #endif
2020  ux_exception(int                        exception,
2021      mach_exception_code_t      code,
2022      mach_exception_subcode_t   subcode)
2023  {
2024  	int machine_signal = 0;
2025  
2026  #ifndef __DARLING__
2027  	/* Try machine-dependent translation first. */
2028  	if ((machine_signal = machine_exception(exception, code, subcode)) != 0) {
2029  		return machine_signal;
2030  	}
2031  #endif
2032  
2033  	switch (exception) {
2034  	case EXC_BAD_ACCESS:
2035  		if (code == KERN_INVALID_ADDRESS) {
2036  			return SIGSEGV;
2037  		} else {
2038  			return SIGBUS;
2039  		}
2040  
2041  	case EXC_BAD_INSTRUCTION:
2042  		return SIGILL;
2043  
2044  	case EXC_ARITHMETIC:
2045  		return SIGFPE;
2046  
2047  #ifndef __DARLING__
2048  	case EXC_EMULATION:
2049  		return SIGEMT;
2050  #endif
2051  
2052  	case EXC_SOFTWARE:
2053  		switch (code) {
2054  		case EXC_UNIX_BAD_SYSCALL:
2055  			return SIGSYS;
2056  		case EXC_UNIX_BAD_PIPE:
2057  			return SIGPIPE;
2058  		case EXC_UNIX_ABORT:
2059  			return SIGABRT;
2060  		case EXC_SOFT_SIGNAL:
2061  			return SIGKILL;
2062  		}
2063  		break;
2064  
2065  	case EXC_BREAKPOINT:
2066  		return SIGTRAP;
2067  	}
2068  
2069  	return 0;
2070  }
2071  
2072  // </copied>