/ duct-tape / xnu / osfmk / arm64 / pcb.c
pcb.c
   1  /*
   2   * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
   3   *
   4   * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
   5   *
   6   * This file contains Original Code and/or Modifications of Original Code
   7   * as defined in and that are subject to the Apple Public Source License
   8   * Version 2.0 (the 'License'). You may not use this file except in
   9   * compliance with the License. The rights granted to you under the License
  10   * may not be used to create, or enable the creation or redistribution of,
  11   * unlawful or unlicensed copies of an Apple operating system, or to
  12   * circumvent, violate, or enable the circumvention or violation of, any
  13   * terms of an Apple operating system software license agreement.
  14   *
  15   * Please obtain a copy of the License at
  16   * http://www.opensource.apple.com/apsl/ and read it before using this file.
  17   *
  18   * The Original Code and all software distributed under the License are
  19   * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  20   * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  21   * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  22   * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  23   * Please see the License for the specific language governing rights and
  24   * limitations under the License.
  25   *
  26   * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  27   */
  28  
  29  #include <debug.h>
  30  
  31  #include <types.h>
  32  
  33  #include <mach/mach_types.h>
  34  #include <mach/thread_status.h>
  35  #include <mach/vm_types.h>
  36  
  37  #include <kern/kern_types.h>
  38  #include <kern/task.h>
  39  #include <kern/thread.h>
  40  #include <kern/misc_protos.h>
  41  #include <kern/mach_param.h>
  42  #include <kern/spl.h>
  43  #include <kern/machine.h>
  44  #include <kern/kpc.h>
  45  
  46  #if MONOTONIC
  47  #include <kern/monotonic.h>
  48  #endif /* MONOTONIC */
  49  
  50  #include <machine/atomic.h>
  51  #include <arm64/proc_reg.h>
  52  #include <arm64/machine_machdep.h>
  53  #include <arm/cpu_data_internal.h>
  54  #include <arm/machdep_call.h>
  55  #include <arm/misc_protos.h>
  56  #include <arm/cpuid.h>
  57  
  58  #include <vm/vm_map.h>
  59  #include <vm/vm_protos.h>
  60  
  61  #include <sys/kdebug.h>
  62  
  63  
  64  extern int debug_task;
  65  extern bool need_wa_rdar_55577508;
  66  
  67  /* zone for debug_state area */
  68  ZONE_DECLARE(ads_zone, "arm debug state", sizeof(arm_debug_state_t), ZC_NONE);
  69  ZONE_DECLARE(user_ss_zone, "user save state", sizeof(arm_context_t), ZC_NONE);
  70  
  71  /*
  72   * Routine: consider_machine_collect
  73   *
  74   */
  75  void
  76  consider_machine_collect(void)
  77  {
  78  	pmap_gc();
  79  }
  80  
  81  /*
  82   * Routine: consider_machine_adjust
  83   *
  84   */
  85  void
  86  consider_machine_adjust(void)
  87  {
  88  }
  89  
  90  
  91  
  92  
  93  static inline void
  94  machine_thread_switch_cpu_data(thread_t old, thread_t new)
  95  {
  96  	/*
  97  	 * We build with -fno-strict-aliasing, so the load through temporaries
  98  	 * is required so that this generates a single load / store pair.
  99  	 */
 100  	cpu_data_t *datap = old->machine.CpuDatap;
 101  	vm_offset_t base  = old->machine.pcpu_data_base;
 102  
 103  	/* TODO: Should this be ordered? */
 104  
 105  	old->machine.CpuDatap = NULL;
 106  	old->machine.pcpu_data_base = 0;
 107  
 108  	new->machine.CpuDatap = datap;
 109  	new->machine.pcpu_data_base = base;
 110  }
 111  
 112  /**
 113   * routine: machine_switch_pmap_and_extended_context
 114   *
 115   * Helper function used by machine_switch_context and machine_stack_handoff to switch the
 116   * extended context and switch the pmap if necessary.
 117   *
 118   */
 119  
 120  static inline void
 121  machine_switch_pmap_and_extended_context(thread_t old, thread_t new)
 122  {
 123  	pmap_t new_pmap;
 124  
 125  
 126  
 127  	new_pmap = new->map->pmap;
 128  	if (old->map->pmap != new_pmap) {
 129  		pmap_switch(new_pmap);
 130  	} else {
 131  		/*
 132  		 * If the thread is preempted while performing cache or TLB maintenance,
 133  		 * it may be migrated to a different CPU between the completion of the relevant
 134  		 * maintenance instruction and the synchronizing DSB.   ARM requires that the
 135  		 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
 136  		 * in order to guarantee completion of the instruction and visibility of its effects.
 137  		 * Issue DSB here to enforce that guarantee.  We only do this for the case in which
 138  		 * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates
 139  		 * TTBR0.  Note also that cache maintenance may be performed in userspace, so we
 140  		 * cannot further limit this operation e.g. by setting a per-thread flag to indicate
 141  		 * a pending kernel TLB or cache maintenance instruction.
 142  		 */
 143  		__builtin_arm_dsb(DSB_ISH);
 144  	}
 145  
 146  
 147  	machine_thread_switch_cpu_data(old, new);
 148  }
 149  
 150  /*
 151   * Routine: machine_switch_context
 152   *
 153   */
 154  thread_t
 155  machine_switch_context(thread_t old,
 156      thread_continue_t continuation,
 157      thread_t new)
 158  {
 159  	thread_t retval;
 160  
 161  #if __ARM_PAN_AVAILABLE__
 162  	if (__improbable(__builtin_arm_rsr("pan") == 0)) {
 163  		panic("context switch with PAN disabled");
 164  	}
 165  #endif
 166  
 167  #define machine_switch_context_kprintf(x...) \
 168  	/* kprintf("machine_switch_context: " x) */
 169  
 170  	if (old == new) {
 171  		panic("machine_switch_context");
 172  	}
 173  
 174  	kpc_off_cpu(old);
 175  
 176  	machine_switch_pmap_and_extended_context(old, new);
 177  
 178  	machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
 179  
 180  	retval = Switch_context(old, continuation, new);
 181  	assert(retval != NULL);
 182  
 183  	return retval;
 184  }
 185  
 186  boolean_t
 187  machine_thread_on_core(thread_t thread)
 188  {
 189  	return thread->machine.CpuDatap != NULL;
 190  }
 191  
 192  
 193  /*
 194   * Routine: machine_thread_create
 195   *
 196   */
 197  kern_return_t
 198  machine_thread_create(thread_t thread,
 199      task_t task)
 200  {
 201  	arm_context_t *thread_user_ss = NULL;
 202  	kern_return_t result = KERN_SUCCESS;
 203  
 204  #define machine_thread_create_kprintf(x...) \
 205  	/* kprintf("machine_thread_create: " x) */
 206  
 207  	machine_thread_create_kprintf("thread = %x\n", thread);
 208  
 209  	if (current_thread() != thread) {
 210  		thread->machine.CpuDatap = (cpu_data_t *)0;
 211  		// setting this offset will cause trying to use it to panic
 212  		thread->machine.pcpu_data_base = (vm_offset_t)VM_MIN_KERNEL_ADDRESS;
 213  	}
 214  	thread->machine.preemption_count = 0;
 215  	thread->machine.cthread_self = 0;
 216  	thread->machine.kpcb = NULL;
 217  	thread->machine.exception_trace_code = 0;
 218  #if defined(HAS_APPLE_PAC)
 219  	thread->machine.rop_pid = task->rop_pid;
 220  	thread->machine.jop_pid = task->jop_pid;
 221  	thread->machine.disable_user_jop = task->disable_user_jop;
 222  #endif
 223  
 224  
 225  
 226  	if (task != kernel_task) {
 227  		/* If this isn't a kernel thread, we'll have userspace state. */
 228  		thread->machine.contextData = (arm_context_t *)zalloc(user_ss_zone);
 229  
 230  		if (!thread->machine.contextData) {
 231  			result = KERN_FAILURE;
 232  			goto done;
 233  		}
 234  
 235  		thread->machine.upcb = &thread->machine.contextData->ss;
 236  		thread->machine.uNeon = &thread->machine.contextData->ns;
 237  
 238  		if (task_has_64Bit_data(task)) {
 239  			thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
 240  			thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
 241  			thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
 242  			thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
 243  		} else {
 244  			thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
 245  			thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
 246  			thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
 247  			thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
 248  		}
 249  	} else {
 250  		thread->machine.upcb = NULL;
 251  		thread->machine.uNeon = NULL;
 252  		thread->machine.contextData = NULL;
 253  	}
 254  
 255  
 256  
 257  	bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
 258  	result = machine_thread_state_initialize(thread);
 259  
 260  done:
 261  	if (result != KERN_SUCCESS) {
 262  		thread_user_ss = thread->machine.contextData;
 263  
 264  		if (thread_user_ss) {
 265  			thread->machine.upcb = NULL;
 266  			thread->machine.uNeon = NULL;
 267  			thread->machine.contextData = NULL;
 268  			zfree(user_ss_zone, thread_user_ss);
 269  		}
 270  	}
 271  
 272  	return result;
 273  }
 274  
 275  /*
 276   * Routine: machine_thread_destroy
 277   *
 278   */
 279  void
 280  machine_thread_destroy(thread_t thread)
 281  {
 282  	arm_context_t *thread_user_ss;
 283  
 284  	if (thread->machine.contextData) {
 285  		/* Disassociate the user save state from the thread before we free it. */
 286  		thread_user_ss = thread->machine.contextData;
 287  		thread->machine.upcb = NULL;
 288  		thread->machine.uNeon = NULL;
 289  		thread->machine.contextData = NULL;
 290  
 291  
 292  		zfree(user_ss_zone, thread_user_ss);
 293  	}
 294  
 295  	if (thread->machine.DebugData != NULL) {
 296  		if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
 297  			arm_debug_set(NULL);
 298  		}
 299  
 300  		zfree(ads_zone, thread->machine.DebugData);
 301  	}
 302  }
 303  
 304  
 305  /*
 306   * Routine: machine_thread_init
 307   *
 308   */
 309  void
 310  machine_thread_init(void)
 311  {
 312  }
 313  
 314  /*
 315   * Routine:	machine_thread_template_init
 316   *
 317   */
 318  void
 319  machine_thread_template_init(thread_t __unused thr_template)
 320  {
 321  	/* Nothing to do on this platform. */
 322  }
 323  
 324  /*
 325   * Routine: get_useraddr
 326   *
 327   */
 328  user_addr_t
 329  get_useraddr()
 330  {
 331  	return get_saved_state_pc(current_thread()->machine.upcb);
 332  }
 333  
 334  /*
 335   * Routine: machine_stack_detach
 336   *
 337   */
 338  vm_offset_t
 339  machine_stack_detach(thread_t thread)
 340  {
 341  	vm_offset_t stack;
 342  
 343  	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
 344  	    (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
 345  
 346  	stack = thread->kernel_stack;
 347  	thread->kernel_stack = 0;
 348  	thread->machine.kstackptr = 0;
 349  
 350  	return stack;
 351  }
 352  
 353  
 354  /*
 355   * Routine: machine_stack_attach
 356   *
 357   */
 358  void
 359  machine_stack_attach(thread_t thread,
 360      vm_offset_t stack)
 361  {
 362  	struct arm_kernel_context *context;
 363  	struct arm_kernel_saved_state *savestate;
 364  	struct arm_kernel_neon_saved_state *neon_savestate;
 365  	uint32_t current_el;
 366  
 367  #define machine_stack_attach_kprintf(x...) \
 368  	/* kprintf("machine_stack_attach: " x) */
 369  
 370  	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
 371  	    (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
 372  
 373  	thread->kernel_stack = stack;
 374  	thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
 375  	thread_initialize_kernel_state(thread);
 376  
 377  	machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)thread->machine.kstackptr);
 378  
 379  	current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL");
 380  	context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
 381  	savestate = &context->ss;
 382  	savestate->fp = 0;
 383  	savestate->sp = thread->machine.kstackptr;
 384  
 385  	/*
 386  	 * The PC and CPSR of the kernel stack saved state are never used by context switch
 387  	 * code, and should never be used on exception return either. We're going to poison
 388  	 * these values to ensure they never get copied to the exception frame and used to
 389  	 * hijack control flow or privilege level on exception return.
 390  	 */
 391  
 392  	const uint32_t default_cpsr = PSR64_KERNEL_POISON;
 393  #if defined(HAS_APPLE_PAC)
 394  	/* Sign the initial kernel stack saved state */
 395  	boolean_t intr = ml_set_interrupts_enabled(FALSE);
 396  	asm volatile (
 397                  "mov	x0, %[ss]"                              "\n"
 398  
 399                  "mov	x1, xzr"                                "\n"
 400                  "str	x1, [x0, %[SS64_PC]]"                   "\n"
 401  
 402                  "mov	x2, %[default_cpsr_lo]"                 "\n"
 403                  "movk	x2, %[default_cpsr_hi], lsl #16"        "\n"
 404                  "str	w2, [x0, %[SS64_CPSR]]"                 "\n"
 405  
 406                  "adrp	x3, _thread_continue@page"              "\n"
 407                  "add	x3, x3, _thread_continue@pageoff"       "\n"
 408                  "str	x3, [x0, %[SS64_LR]]"                   "\n"
 409  
 410                  "mov	x4, xzr"                                "\n"
 411                  "mov	x5, xzr"                                "\n"
 412                  "stp	x4, x5, [x0, %[SS64_X16]]"              "\n"
 413  
 414                  "mov	x6, lr"                                 "\n"
 415                  "bl	_ml_sign_kernel_thread_state"                   "\n"
 416                  "mov	lr, x6"                                 "\n"
 417                  :
 418                  : [ss]                  "r"(&context->ss),
 419                    [default_cpsr_lo]     "M"(default_cpsr & 0xFFFF),
 420                    [default_cpsr_hi]     "M"(default_cpsr >> 16),
 421                    [SS64_X16]            "i"(offsetof(struct arm_kernel_saved_state, x[0])),
 422                    [SS64_PC]             "i"(offsetof(struct arm_kernel_saved_state, pc)),
 423                    [SS64_CPSR]           "i"(offsetof(struct arm_kernel_saved_state, cpsr)),
 424                    [SS64_LR]             "i"(offsetof(struct arm_kernel_saved_state, lr))
 425                  : "x0", "x1", "x2", "x3", "x4", "x5", "x6"
 426          );
 427  	ml_set_interrupts_enabled(intr);
 428  #else
 429  	savestate->lr = (uintptr_t)thread_continue;
 430  	savestate->cpsr = default_cpsr;
 431  	savestate->pc = 0;
 432  #endif /* defined(HAS_APPLE_PAC) */
 433  	neon_savestate = &context->ns;
 434  	neon_savestate->fpcr = FPCR_DEFAULT;
 435  	machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
 436  }
 437  
 438  
 439  /*
 440   * Routine: machine_stack_handoff
 441   *
 442   */
 443  void
 444  machine_stack_handoff(thread_t old,
 445      thread_t new)
 446  {
 447  	vm_offset_t  stack;
 448  
 449  #if __ARM_PAN_AVAILABLE__
 450  	if (__improbable(__builtin_arm_rsr("pan") == 0)) {
 451  		panic("stack handoff with PAN disabled");
 452  	}
 453  #endif
 454  
 455  	kpc_off_cpu(old);
 456  
 457  	stack = machine_stack_detach(old);
 458  	new->kernel_stack = stack;
 459  	new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
 460  	if (stack == old->reserved_stack) {
 461  		assert(new->reserved_stack);
 462  		old->reserved_stack = new->reserved_stack;
 463  		new->reserved_stack = stack;
 464  	}
 465  
 466  	machine_switch_pmap_and_extended_context(old, new);
 467  
 468  	machine_set_current_thread(new);
 469  	thread_initialize_kernel_state(new);
 470  }
 471  
 472  
 473  /*
 474   * Routine: call_continuation
 475   *
 476   */
 477  void
 478  call_continuation(thread_continue_t continuation,
 479      void *parameter,
 480      wait_result_t wresult,
 481      boolean_t enable_interrupts)
 482  {
 483  #define call_continuation_kprintf(x...) \
 484  	/* kprintf("call_continuation_kprintf:" x) */
 485  
 486  	call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation, current_thread()->machine.kstackptr);
 487  	Call_continuation(continuation, parameter, wresult, enable_interrupts);
 488  }
 489  
 490  #define SET_DBGBCRn(n, value, accum) \
 491  	__asm__ volatile( \
 492  	        "msr DBGBCR" #n "_EL1, %[val]\n" \
 493  	        "orr %[result], %[result], %[val]\n" \
 494  	        : [result] "+r"(accum) : [val] "r"((value)))
 495  
 496  #define SET_DBGBVRn(n, value) \
 497  	__asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
 498  
 499  #define SET_DBGWCRn(n, value, accum) \
 500  	__asm__ volatile( \
 501  	        "msr DBGWCR" #n "_EL1, %[val]\n" \
 502  	        "orr %[result], %[result], %[val]\n" \
 503  	        : [result] "+r"(accum) : [val] "r"((value)))
 504  
 505  #define SET_DBGWVRn(n, value) \
 506  	__asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
 507  
 508  void
 509  arm_debug_set32(arm_debug_state_t *debug_state)
 510  {
 511  	struct cpu_data *  cpu_data_ptr;
 512  	arm_debug_info_t * debug_info    = arm_debug_info();
 513  	boolean_t          intr;
 514  	arm_debug_state_t  off_state;
 515  	uint64_t           all_ctrls = 0;
 516  
 517  	intr = ml_set_interrupts_enabled(FALSE);
 518  	cpu_data_ptr = getCpuDatap();
 519  
 520  	// Set current user debug
 521  	cpu_data_ptr->cpu_user_debug = debug_state;
 522  
 523  	if (NULL == debug_state) {
 524  		bzero(&off_state, sizeof(off_state));
 525  		debug_state = &off_state;
 526  	}
 527  
 528  	switch (debug_info->num_breakpoint_pairs) {
 529  	case 16:
 530  		SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
 531  		SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
 532  		OS_FALLTHROUGH;
 533  	case 15:
 534  		SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
 535  		SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
 536  		OS_FALLTHROUGH;
 537  	case 14:
 538  		SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
 539  		SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
 540  		OS_FALLTHROUGH;
 541  	case 13:
 542  		SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
 543  		SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
 544  		OS_FALLTHROUGH;
 545  	case 12:
 546  		SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
 547  		SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
 548  		OS_FALLTHROUGH;
 549  	case 11:
 550  		SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
 551  		SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
 552  		OS_FALLTHROUGH;
 553  	case 10:
 554  		SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
 555  		SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
 556  		OS_FALLTHROUGH;
 557  	case 9:
 558  		SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
 559  		SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
 560  		OS_FALLTHROUGH;
 561  	case 8:
 562  		SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
 563  		SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
 564  		OS_FALLTHROUGH;
 565  	case 7:
 566  		SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
 567  		SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
 568  		OS_FALLTHROUGH;
 569  	case 6:
 570  		SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
 571  		SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
 572  		OS_FALLTHROUGH;
 573  	case 5:
 574  		SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
 575  		SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
 576  		OS_FALLTHROUGH;
 577  	case 4:
 578  		SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
 579  		SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
 580  		OS_FALLTHROUGH;
 581  	case 3:
 582  		SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
 583  		SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
 584  		OS_FALLTHROUGH;
 585  	case 2:
 586  		SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
 587  		SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
 588  		OS_FALLTHROUGH;
 589  	case 1:
 590  		SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
 591  		SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
 592  		OS_FALLTHROUGH;
 593  	default:
 594  		break;
 595  	}
 596  
 597  	switch (debug_info->num_watchpoint_pairs) {
 598  	case 16:
 599  		SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
 600  		SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
 601  		OS_FALLTHROUGH;
 602  	case 15:
 603  		SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
 604  		SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
 605  		OS_FALLTHROUGH;
 606  	case 14:
 607  		SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
 608  		SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
 609  		OS_FALLTHROUGH;
 610  	case 13:
 611  		SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
 612  		SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
 613  		OS_FALLTHROUGH;
 614  	case 12:
 615  		SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
 616  		SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
 617  		OS_FALLTHROUGH;
 618  	case 11:
 619  		SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
 620  		SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
 621  		OS_FALLTHROUGH;
 622  	case 10:
 623  		SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
 624  		SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
 625  		OS_FALLTHROUGH;
 626  	case 9:
 627  		SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
 628  		SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
 629  		OS_FALLTHROUGH;
 630  	case 8:
 631  		SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
 632  		SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
 633  		OS_FALLTHROUGH;
 634  	case 7:
 635  		SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
 636  		SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
 637  		OS_FALLTHROUGH;
 638  	case 6:
 639  		SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
 640  		SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
 641  		OS_FALLTHROUGH;
 642  	case 5:
 643  		SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
 644  		SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
 645  		OS_FALLTHROUGH;
 646  	case 4:
 647  		SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
 648  		SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
 649  		OS_FALLTHROUGH;
 650  	case 3:
 651  		SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
 652  		SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
 653  		OS_FALLTHROUGH;
 654  	case 2:
 655  		SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
 656  		SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
 657  		OS_FALLTHROUGH;
 658  	case 1:
 659  		SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
 660  		SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
 661  		OS_FALLTHROUGH;
 662  	default:
 663  		break;
 664  	}
 665  
 666  #if defined(CONFIG_KERNEL_INTEGRITY)
 667  	if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
 668  		panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
 669  	}
 670  #endif
 671  
 672  	/*
 673  	 * Breakpoint/Watchpoint Enable
 674  	 */
 675  	if (all_ctrls != 0) {
 676  		update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
 677  	} else {
 678  		update_mdscr(0x8000, 0);
 679  	}
 680  
 681  	/*
 682  	 * Software debug single step enable
 683  	 */
 684  	if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
 685  		update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
 686  
 687  		mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
 688  	} else {
 689  		update_mdscr(0x1, 0);
 690  
 691  #if SINGLE_STEP_RETIRE_ERRATA
 692  		// Workaround for radar 20619637
 693  		__builtin_arm_isb(ISB_SY);
 694  #endif
 695  	}
 696  
 697  	(void) ml_set_interrupts_enabled(intr);
 698  }
 699  
 700  void
 701  arm_debug_set64(arm_debug_state_t *debug_state)
 702  {
 703  	struct cpu_data *  cpu_data_ptr;
 704  	arm_debug_info_t * debug_info    = arm_debug_info();
 705  	boolean_t          intr;
 706  	arm_debug_state_t  off_state;
 707  	uint64_t           all_ctrls = 0;
 708  
 709  	intr = ml_set_interrupts_enabled(FALSE);
 710  	cpu_data_ptr = getCpuDatap();
 711  
 712  	// Set current user debug
 713  	cpu_data_ptr->cpu_user_debug = debug_state;
 714  
 715  	if (NULL == debug_state) {
 716  		bzero(&off_state, sizeof(off_state));
 717  		debug_state = &off_state;
 718  	}
 719  
 720  	switch (debug_info->num_breakpoint_pairs) {
 721  	case 16:
 722  		SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
 723  		SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
 724  		OS_FALLTHROUGH;
 725  	case 15:
 726  		SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
 727  		SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
 728  		OS_FALLTHROUGH;
 729  	case 14:
 730  		SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
 731  		SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
 732  		OS_FALLTHROUGH;
 733  	case 13:
 734  		SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
 735  		SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
 736  		OS_FALLTHROUGH;
 737  	case 12:
 738  		SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
 739  		SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
 740  		OS_FALLTHROUGH;
 741  	case 11:
 742  		SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
 743  		SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
 744  		OS_FALLTHROUGH;
 745  	case 10:
 746  		SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
 747  		SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
 748  		OS_FALLTHROUGH;
 749  	case 9:
 750  		SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
 751  		SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
 752  		OS_FALLTHROUGH;
 753  	case 8:
 754  		SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
 755  		SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
 756  		OS_FALLTHROUGH;
 757  	case 7:
 758  		SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
 759  		SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
 760  		OS_FALLTHROUGH;
 761  	case 6:
 762  		SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
 763  		SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
 764  		OS_FALLTHROUGH;
 765  	case 5:
 766  		SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
 767  		SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
 768  		OS_FALLTHROUGH;
 769  	case 4:
 770  		SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
 771  		SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
 772  		OS_FALLTHROUGH;
 773  	case 3:
 774  		SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
 775  		SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
 776  		OS_FALLTHROUGH;
 777  	case 2:
 778  		SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
 779  		SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
 780  		OS_FALLTHROUGH;
 781  	case 1:
 782  		SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
 783  		SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
 784  		OS_FALLTHROUGH;
 785  	default:
 786  		break;
 787  	}
 788  
 789  	switch (debug_info->num_watchpoint_pairs) {
 790  	case 16:
 791  		SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
 792  		SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
 793  		OS_FALLTHROUGH;
 794  	case 15:
 795  		SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
 796  		SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
 797  		OS_FALLTHROUGH;
 798  	case 14:
 799  		SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
 800  		SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
 801  		OS_FALLTHROUGH;
 802  	case 13:
 803  		SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
 804  		SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
 805  		OS_FALLTHROUGH;
 806  	case 12:
 807  		SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
 808  		SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
 809  		OS_FALLTHROUGH;
 810  	case 11:
 811  		SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
 812  		SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
 813  		OS_FALLTHROUGH;
 814  	case 10:
 815  		SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
 816  		SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
 817  		OS_FALLTHROUGH;
 818  	case 9:
 819  		SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
 820  		SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
 821  		OS_FALLTHROUGH;
 822  	case 8:
 823  		SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
 824  		SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
 825  		OS_FALLTHROUGH;
 826  	case 7:
 827  		SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
 828  		SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
 829  		OS_FALLTHROUGH;
 830  	case 6:
 831  		SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
 832  		SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
 833  		OS_FALLTHROUGH;
 834  	case 5:
 835  		SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
 836  		SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
 837  		OS_FALLTHROUGH;
 838  	case 4:
 839  		SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
 840  		SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
 841  		OS_FALLTHROUGH;
 842  	case 3:
 843  		SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
 844  		SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
 845  		OS_FALLTHROUGH;
 846  	case 2:
 847  		SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
 848  		SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
 849  		OS_FALLTHROUGH;
 850  	case 1:
 851  		SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
 852  		SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
 853  		OS_FALLTHROUGH;
 854  	default:
 855  		break;
 856  	}
 857  
 858  #if defined(CONFIG_KERNEL_INTEGRITY)
 859  	if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
 860  		panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
 861  	}
 862  #endif
 863  
 864  	/*
 865  	 * Breakpoint/Watchpoint Enable
 866  	 */
 867  	if (all_ctrls != 0) {
 868  		update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
 869  	} else {
 870  		update_mdscr(0x8000, 0);
 871  	}
 872  
 873  	/*
 874  	 * Software debug single step enable
 875  	 */
 876  	if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
 877  		update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
 878  
 879  		mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
 880  	} else {
 881  		update_mdscr(0x1, 0);
 882  
 883  #if SINGLE_STEP_RETIRE_ERRATA
 884  		// Workaround for radar 20619637
 885  		__builtin_arm_isb(ISB_SY);
 886  #endif
 887  	}
 888  
 889  	(void) ml_set_interrupts_enabled(intr);
 890  }
 891  
 892  void
 893  arm_debug_set(arm_debug_state_t *debug_state)
 894  {
 895  	if (debug_state) {
 896  		switch (debug_state->dsh.flavor) {
 897  		case ARM_DEBUG_STATE32:
 898  			arm_debug_set32(debug_state);
 899  			break;
 900  		case ARM_DEBUG_STATE64:
 901  			arm_debug_set64(debug_state);
 902  			break;
 903  		default:
 904  			panic("arm_debug_set");
 905  			break;
 906  		}
 907  	} else {
 908  		if (thread_is_64bit_data(current_thread())) {
 909  			arm_debug_set64(debug_state);
 910  		} else {
 911  			arm_debug_set32(debug_state);
 912  		}
 913  	}
 914  }
 915  
 916  #define VM_MAX_ADDRESS32          ((vm_address_t) 0x80000000)
 917  boolean_t
 918  debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
 919  {
 920  	arm_debug_info_t *debug_info = arm_debug_info();
 921  	uint32_t i;
 922  	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
 923  		if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
 924  			return FALSE;
 925  		}
 926  	}
 927  
 928  	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
 929  		if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
 930  			return FALSE;
 931  		}
 932  	}
 933  	return TRUE;
 934  }
 935  
 936  boolean_t
 937  debug_state_is_valid32(arm_debug_state32_t *debug_state)
 938  {
 939  	arm_debug_info_t *debug_info = arm_debug_info();
 940  	uint32_t i;
 941  	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
 942  		if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
 943  			return FALSE;
 944  		}
 945  	}
 946  
 947  	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
 948  		if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
 949  			return FALSE;
 950  		}
 951  	}
 952  	return TRUE;
 953  }
 954  
 955  boolean_t
 956  debug_state_is_valid64(arm_debug_state64_t *debug_state)
 957  {
 958  	arm_debug_info_t *debug_info = arm_debug_info();
 959  	uint32_t i;
 960  	for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
 961  		if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i]) {
 962  			return FALSE;
 963  		}
 964  	}
 965  
 966  	for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
 967  		if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i]) {
 968  			return FALSE;
 969  		}
 970  	}
 971  	return TRUE;
 972  }
 973  
 974  /*
 975   * Duplicate one arm_debug_state_t to another.  "all" parameter
 976   * is ignored in the case of ARM -- Is this the right assumption?
 977   */
 978  void
 979  copy_legacy_debug_state(arm_legacy_debug_state_t * src,
 980      arm_legacy_debug_state_t * target,
 981      __unused boolean_t         all)
 982  {
 983  	bcopy(src, target, sizeof(arm_legacy_debug_state_t));
 984  }
 985  
 986  void
 987  copy_debug_state32(arm_debug_state32_t * src,
 988      arm_debug_state32_t * target,
 989      __unused boolean_t    all)
 990  {
 991  	bcopy(src, target, sizeof(arm_debug_state32_t));
 992  }
 993  
 994  void
 995  copy_debug_state64(arm_debug_state64_t * src,
 996      arm_debug_state64_t * target,
 997      __unused boolean_t    all)
 998  {
 999  	bcopy(src, target, sizeof(arm_debug_state64_t));
1000  }
1001  
1002  kern_return_t
1003  machine_thread_set_tsd_base(thread_t         thread,
1004      mach_vm_offset_t tsd_base)
1005  {
1006  	if (thread->task == kernel_task) {
1007  		return KERN_INVALID_ARGUMENT;
1008  	}
1009  
1010  	if (tsd_base & MACHDEP_CPUNUM_MASK) {
1011  		return KERN_INVALID_ARGUMENT;
1012  	}
1013  
1014  	if (thread_is_64bit_addr(thread)) {
1015  		if (tsd_base > vm_map_max(thread->map)) {
1016  			tsd_base = 0ULL;
1017  		}
1018  	} else {
1019  		if (tsd_base > UINT32_MAX) {
1020  			tsd_base = 0ULL;
1021  		}
1022  	}
1023  
1024  	thread->machine.cthread_self = tsd_base;
1025  
1026  	/* For current thread, make the TSD base active immediately */
1027  	if (thread == current_thread()) {
1028  		uint64_t cpunum, tpidrro_el0;
1029  
1030  		mp_disable_preemption();
1031  		tpidrro_el0 = get_tpidrro();
1032  		cpunum = tpidrro_el0 & (MACHDEP_CPUNUM_MASK);
1033  		set_tpidrro(tsd_base | cpunum);
1034  		mp_enable_preemption();
1035  	}
1036  
1037  	return KERN_SUCCESS;
1038  }
1039  
1040  void
1041  machine_tecs(__unused thread_t thr)
1042  {
1043  }
1044  
1045  int
1046  machine_csv(__unused cpuvn_e cve)
1047  {
1048  	return 0;
1049  }
1050  
1051  #if __ARM_ARCH_8_5__
1052  void
1053  arm_context_switch_requires_sync()
1054  {
1055  	current_cpu_datap()->sync_on_cswitch = 1;
1056  }
1057  #endif
1058  
1059  #if __has_feature(ptrauth_calls)
1060  boolean_t
1061  arm_user_jop_disabled(void)
1062  {
1063  	return FALSE;
1064  }
1065  #endif /* __has_feature(ptrauth_calls) */