/ duct-tape / xnu / osfmk / arm64 / cpu.c
cpu.c
   1  /*
   2   * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
   3   *
   4   * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
   5   *
   6   * This file contains Original Code and/or Modifications of Original Code
   7   * as defined in and that are subject to the Apple Public Source License
   8   * Version 2.0 (the 'License'). You may not use this file except in
   9   * compliance with the License. The rights granted to you under the License
  10   * may not be used to create, or enable the creation or redistribution of,
  11   * unlawful or unlicensed copies of an Apple operating system, or to
  12   * circumvent, violate, or enable the circumvention or violation of, any
  13   * terms of an Apple operating system software license agreement.
  14   *
  15   * Please obtain a copy of the License at
  16   * http://www.opensource.apple.com/apsl/ and read it before using this file.
  17   *
  18   * The Original Code and all software distributed under the License are
  19   * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  20   * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  21   * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  22   * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  23   * Please see the License for the specific language governing rights and
  24   * limitations under the License.
  25   *
  26   * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  27   */
  28  /*
  29   *	File:	arm64/cpu.c
  30   *
  31   *	cpu specific routines
  32   */
  33  
  34  #include <pexpert/arm64/board_config.h>
  35  #include <kern/kalloc.h>
  36  #include <kern/machine.h>
  37  #include <kern/cpu_number.h>
  38  #include <kern/percpu.h>
  39  #include <kern/thread.h>
  40  #include <kern/timer_queue.h>
  41  #include <arm/cpu_data.h>
  42  #include <arm/cpuid.h>
  43  #include <arm/caches_internal.h>
  44  #include <arm/cpu_data_internal.h>
  45  #include <arm/cpu_internal.h>
  46  #include <arm/misc_protos.h>
  47  #include <arm/machine_cpu.h>
  48  #include <arm/rtclock.h>
  49  #include <arm64/proc_reg.h>
  50  #include <mach/processor_info.h>
  51  #include <vm/pmap.h>
  52  #include <vm/vm_kern.h>
  53  #include <vm/vm_map.h>
  54  #include <pexpert/arm/protos.h>
  55  #include <pexpert/device_tree.h>
  56  #include <sys/kdebug.h>
  57  #include <arm/machine_routines.h>
  58  
  59  #include <machine/atomic.h>
  60  
  61  #include <san/kasan.h>
  62  
  63  #if KPC
  64  #include <kern/kpc.h>
  65  #endif
  66  
  67  #if MONOTONIC
  68  #include <kern/monotonic.h>
  69  #endif /* MONOTONIC */
  70  
  71  #if HIBERNATION
  72  #include <IOKit/IOPlatformExpert.h>
  73  #include <IOKit/IOHibernatePrivate.h>
  74  #endif /* HIBERNATION */
  75  
  76  
  77  #include <libkern/section_keywords.h>
  78  
  79  extern boolean_t        idle_enable;
  80  extern uint64_t         wake_abstime;
  81  
  82  #if WITH_CLASSIC_S2R
  83  void sleep_token_buffer_init(void);
  84  #endif
  85  
  86  
  87  extern uintptr_t resume_idle_cpu;
  88  extern uintptr_t start_cpu;
  89  
  90  #if __ARM_KERNEL_PROTECT__
  91  extern void exc_vectors_table;
  92  #endif /* __ARM_KERNEL_PROTECT__ */
  93  
  94  extern void __attribute__((noreturn)) arm64_prepare_for_sleep(boolean_t deep_sleep);
  95  extern void arm64_force_wfi_clock_gate(void);
  96  #if defined(APPLETYPHOON)
  97  // <rdar://problem/15827409>
  98  extern void typhoon_prepare_for_wfi(void);
  99  extern void typhoon_return_from_wfi(void);
 100  #endif
 101  
 102  #if HAS_RETENTION_STATE
 103  extern void arm64_retention_wfi(void);
 104  #endif
 105  
 106  vm_address_t   start_cpu_paddr;
 107  
 108  sysreg_restore_t sysreg_restore __attribute__((section("__DATA, __const"))) = {
 109  	.tcr_el1 = TCR_EL1_BOOT,
 110  };
 111  
 112  
 113  // wfi - wfi mode
 114  //  0 : disabled
 115  //  1 : normal
 116  //  2 : overhead simulation (delay & flags)
 117  static int wfi = 1;
 118  
 119  #if DEVELOPMENT || DEBUG
 120  
 121  // wfi_flags
 122  //  1 << 0 : flush L1s
 123  //  1 << 1 : flush TLBs
 124  static int wfi_flags = 0;
 125  
 126  // wfi_delay - delay ticks after wfi exit
 127  static uint64_t wfi_delay = 0;
 128  
 129  #endif /* DEVELOPMENT || DEBUG */
 130  #if DEVELOPMENT || DEBUG
 131  static bool idle_proximate_timer_wfe = true;
 132  static bool idle_proximate_io_wfe = true;
 133  #define CPUPM_IDLE_WFE 0x5310300
 134  #else
 135  static const bool idle_proximate_timer_wfe = true;
 136  static const bool idle_proximate_io_wfe = true;
 137  #endif
 138  
 139  #if __ARM_GLOBAL_SLEEP_BIT__
 140  volatile boolean_t arm64_stall_sleep = TRUE;
 141  #endif
 142  
 143  #if WITH_CLASSIC_S2R
 144  /*
 145   * These must be aligned to avoid issues with calling bcopy_phys on them before
 146   * we are done with pmap initialization.
 147   */
 148  static const uint8_t __attribute__ ((aligned(8))) suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
 149  static const uint8_t __attribute__ ((aligned(8))) running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
 150  #endif
 151  
 152  #if WITH_CLASSIC_S2R
 153  static vm_offset_t sleepTokenBuffer = (vm_offset_t)NULL;
 154  #endif
 155  static boolean_t coresight_debug_enabled = FALSE;
 156  
 157  #if defined(CONFIG_XNUPOST)
 158  void arm64_ipi_test_callback(void *);
 159  void arm64_immediate_ipi_test_callback(void *);
 160  
 161  void
 162  arm64_ipi_test_callback(void *parm)
 163  {
 164  	volatile uint64_t *ipi_test_data = parm;
 165  	cpu_data_t *cpu_data;
 166  
 167  	cpu_data = getCpuDatap();
 168  
 169  	*ipi_test_data = cpu_data->cpu_number;
 170  }
 171  
 172  void
 173  arm64_immediate_ipi_test_callback(void *parm)
 174  {
 175  	volatile uint64_t *ipi_test_data = parm;
 176  	cpu_data_t *cpu_data;
 177  
 178  	cpu_data = getCpuDatap();
 179  
 180  	*ipi_test_data = cpu_data->cpu_number + MAX_CPUS;
 181  }
 182  
 183  uint64_t arm64_ipi_test_data[MAX_CPUS * 2];
 184  
 185  void
 186  arm64_ipi_test()
 187  {
 188  	volatile uint64_t *ipi_test_data, *immediate_ipi_test_data;
 189  	uint32_t timeout_ms = 100;
 190  	uint64_t then, now, delta;
 191  	int current_cpu_number = getCpuDatap()->cpu_number;
 192  
 193  	/*
 194  	 * probably the only way to have this on most systems is with the
 195  	 * cpus=1 boot-arg, but nonetheless, if we only have 1 CPU active,
 196  	 * IPI is not available
 197  	 */
 198  	if (real_ncpus == 1) {
 199  		return;
 200  	}
 201  
 202  	const unsigned int max_cpu_id = ml_get_max_cpu_number();
 203  	for (unsigned int i = 0; i <= max_cpu_id; ++i) {
 204  		ipi_test_data = &arm64_ipi_test_data[i];
 205  		immediate_ipi_test_data = &arm64_ipi_test_data[i + MAX_CPUS];
 206  		*ipi_test_data = ~i;
 207  		kern_return_t error = cpu_xcall((int)i, (void *)arm64_ipi_test_callback, (void *)(uintptr_t)ipi_test_data);
 208  		if (error != KERN_SUCCESS) {
 209  			panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number, i, error);
 210  		}
 211  
 212  		while ((error = cpu_immediate_xcall((int)i, (void *)arm64_immediate_ipi_test_callback,
 213  		    (void *)(uintptr_t)immediate_ipi_test_data)) == KERN_ALREADY_WAITING) {
 214  			now = mach_absolute_time();
 215  			absolutetime_to_nanoseconds(now - then, &delta);
 216  			if ((delta / NSEC_PER_MSEC) > timeout_ms) {
 217  				panic("CPU %d was unable to immediate-IPI CPU %u within %dms", current_cpu_number, i, timeout_ms);
 218  			}
 219  		}
 220  
 221  		if (error != KERN_SUCCESS) {
 222  			panic("CPU %d was unable to immediate-IPI CPU %u: error %d", current_cpu_number, i, error);
 223  		}
 224  
 225  		then = mach_absolute_time();
 226  
 227  		while ((*ipi_test_data != i) || (*immediate_ipi_test_data != (i + MAX_CPUS))) {
 228  			now = mach_absolute_time();
 229  			absolutetime_to_nanoseconds(now - then, &delta);
 230  			if ((delta / NSEC_PER_MSEC) > timeout_ms) {
 231  				panic("CPU %d tried to IPI CPU %d but didn't get correct responses within %dms, responses: %llx, %llx",
 232  				    current_cpu_number, i, timeout_ms, *ipi_test_data, *immediate_ipi_test_data);
 233  			}
 234  		}
 235  	}
 236  }
 237  #endif /* defined(CONFIG_XNUPOST) */
 238  
 239  static void
 240  configure_coresight_registers(cpu_data_t *cdp)
 241  {
 242  	int             i;
 243  
 244  	assert(cdp);
 245  	vm_offset_t     coresight_regs = ml_get_topology_info()->cpus[cdp->cpu_number].coresight_regs;
 246  
 247  	/*
 248  	 * ARMv8 coresight registers are optional. If the device tree did not
 249  	 * provide either cpu_regmap_paddr (from the legacy "reg-private" EDT property)
 250  	 * or coresight_regs (from the new "coresight-reg" property), assume that
 251  	 * coresight registers are not supported.
 252  	 */
 253  	if (cdp->cpu_regmap_paddr || coresight_regs) {
 254  		for (i = 0; i < CORESIGHT_REGIONS; ++i) {
 255  			if (i == CORESIGHT_CTI) {
 256  				continue;
 257  			}
 258  			/* Skip debug-only registers on production chips */
 259  			if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled) {
 260  				continue;
 261  			}
 262  
 263  			if (!cdp->coresight_base[i]) {
 264  				if (coresight_regs) {
 265  					cdp->coresight_base[i] = coresight_regs + CORESIGHT_OFFSET(i);
 266  				} else {
 267  					uint64_t addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i);
 268  					cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE);
 269  				}
 270  
 271  				/*
 272  				 * At this point, failing to io map the
 273  				 * registers is considered as an error.
 274  				 */
 275  				if (!cdp->coresight_base[i]) {
 276  					panic("unable to ml_io_map coresight regions");
 277  				}
 278  			}
 279  			/* Unlock EDLAR, CTILAR, PMLAR */
 280  			if (i != CORESIGHT_UTT) {
 281  				*(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
 282  			}
 283  		}
 284  	}
 285  }
 286  
 287  
 288  /*
 289   *	Routine:	cpu_bootstrap
 290   *	Function:
 291   */
 292  void
 293  cpu_bootstrap(void)
 294  {
 295  }
 296  
 297  /*
 298   *	Routine:	cpu_sleep
 299   *	Function:
 300   */
 301  void
 302  cpu_sleep(void)
 303  {
 304  	cpu_data_t     *cpu_data_ptr = getCpuDatap();
 305  
 306  	pmap_switch_user_ttb(kernel_pmap);
 307  	cpu_data_ptr->cpu_active_thread = current_thread();
 308  	cpu_data_ptr->cpu_reset_handler = (uintptr_t) start_cpu_paddr;
 309  	cpu_data_ptr->cpu_flags |= SleepState;
 310  	cpu_data_ptr->cpu_user_debug = NULL;
 311  #if KPC
 312  	kpc_idle();
 313  #endif /* KPC */
 314  #if MONOTONIC
 315  	mt_cpu_down(cpu_data_ptr);
 316  #endif /* MONOTONIC */
 317  
 318  	CleanPoC_Dcache();
 319  
 320  #if USE_APPLEARMSMP
 321  	if (ml_is_quiescing()) {
 322  		PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
 323  	} else {
 324  		bool deep_sleep = PE_cpu_down(cpu_data_ptr->cpu_id);
 325  		cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
 326  		// hang CPU on spurious wakeup
 327  		cpu_data_ptr->cpu_reset_handler = (uintptr_t)0;
 328  		__builtin_arm_dsb(DSB_ISH);
 329  		CleanPoU_Dcache();
 330  		arm64_prepare_for_sleep(deep_sleep);
 331  	}
 332  #else
 333  	PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
 334  #endif
 335  	/*NOTREACHED*/
 336  }
 337  
 338  /*
 339   *	Routine:	cpu_interrupt_is_pending
 340   *	Function:	Returns the value of ISR.  Due to how this register is
 341   *			is implemented, this returns 0 if there are no
 342   *			interrupts pending, so it can be used as a boolean test.
 343   */
 344  int
 345  cpu_interrupt_is_pending(void)
 346  {
 347  	uint64_t isr_value;
 348  	isr_value = __builtin_arm_rsr64("ISR_EL1");
 349  	return (int)isr_value;
 350  }
 351  
 352  static bool
 353  cpu_proximate_timer(void)
 354  {
 355  	return !SetIdlePop();
 356  }
 357  
 358  static bool
 359  wfe_to_deadline_or_interrupt(uint32_t cid, uint64_t wfe_deadline, __unused cpu_data_t *cdp)
 360  {
 361  	bool ipending = false;
 362  	while ((ipending = (cpu_interrupt_is_pending() != 0)) == false) {
 363  		/* Assumes event stream enablement
 364  		 * TODO: evaluate temporarily stretching the per-CPU event
 365  		 * interval to a larger value for possible efficiency
 366  		 * improvements.
 367  		 */
 368  		__builtin_arm_wfe();
 369  #if DEVELOPMENT || DEBUG
 370  		cdp->wfe_count++;
 371  #endif
 372  		if (wfe_deadline != ~0ULL) {
 373  #if DEVELOPMENT || DEBUG
 374  			cdp->wfe_deadline_checks++;
 375  #endif
 376  			/* Check if the WFE recommendation has expired.
 377  			 * We do not recompute the deadline here.
 378  			 */
 379  			if ((ml_cluster_wfe_timeout(cid) == 0) ||
 380  			    mach_absolute_time() >= wfe_deadline) {
 381  #if DEVELOPMENT || DEBUG
 382  				cdp->wfe_terminations++;
 383  #endif
 384  				break;
 385  			}
 386  		}
 387  	}
 388  	/* TODO: worth refreshing pending interrupt status? */
 389  	return ipending;
 390  }
 391  
 392  /*
 393   *	Routine:	cpu_idle
 394   *	Function:
 395   */
 396  void __attribute__((noreturn))
 397  cpu_idle(void)
 398  {
 399  	cpu_data_t     *cpu_data_ptr = getCpuDatap();
 400  	uint64_t        new_idle_timeout_ticks = 0x0ULL, lastPop;
 401  	bool idle_disallowed = false;
 402  
 403  	if (__improbable((!idle_enable))) {
 404  		idle_disallowed = true;
 405  	} else if (__improbable(cpu_data_ptr->cpu_signal & SIGPdisabled)) {
 406  		idle_disallowed = true;
 407  	}
 408  
 409  	if (__improbable(idle_disallowed)) {
 410  		Idle_load_context();
 411  	}
 412  
 413  	bool ipending = false;
 414  	uint32_t cid = ~0U;
 415  
 416  	if (__probable(idle_proximate_io_wfe == true)) {
 417  		uint64_t wfe_deadline = 0;
 418  		/* Check for an active perf. controller generated
 419  		 * WFE recommendation for this cluster.
 420  		 */
 421  		cid = cpu_data_ptr->cpu_cluster_id;
 422  		uint64_t wfe_ttd = 0;
 423  		if ((wfe_ttd = ml_cluster_wfe_timeout(cid)) != 0) {
 424  			wfe_deadline = mach_absolute_time() + wfe_ttd;
 425  		}
 426  
 427  		if (wfe_deadline != 0) {
 428  			/* Poll issuing event-bounded WFEs until an interrupt
 429  			 * arrives or the WFE recommendation expires
 430  			 */
 431  			ipending = wfe_to_deadline_or_interrupt(cid, wfe_deadline, cpu_data_ptr);
 432  #if DEVELOPMENT || DEBUG
 433  			KDBG(CPUPM_IDLE_WFE, ipending, cpu_data_ptr->wfe_count, wfe_deadline, 0);
 434  #endif
 435  			if (ipending == true) {
 436  				/* Back to machine_idle() */
 437  				Idle_load_context();
 438  			}
 439  		}
 440  	}
 441  
 442  	if (__improbable(cpu_proximate_timer())) {
 443  		if (idle_proximate_timer_wfe == true) {
 444  			/* Poll issuing WFEs until the expected
 445  			 * timer FIQ arrives.
 446  			 */
 447  			ipending = wfe_to_deadline_or_interrupt(cid, ~0ULL, cpu_data_ptr);
 448  			assert(ipending == true);
 449  		}
 450  		Idle_load_context();
 451  	}
 452  
 453  	lastPop = cpu_data_ptr->rtcPop;
 454  
 455  	cpu_data_ptr->cpu_active_thread = current_thread();
 456  	if (cpu_data_ptr->cpu_user_debug) {
 457  		arm_debug_set(NULL);
 458  	}
 459  	cpu_data_ptr->cpu_user_debug = NULL;
 460  
 461  	if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) {
 462  		cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
 463  	}
 464  
 465  	if (cpu_data_ptr->idle_timer_notify != NULL) {
 466  		if (new_idle_timeout_ticks == 0x0ULL) {
 467  			/* turn off the idle timer */
 468  			cpu_data_ptr->idle_timer_deadline = 0x0ULL;
 469  		} else {
 470  			/* set the new idle timeout */
 471  			clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
 472  		}
 473  		timer_resync_deadlines();
 474  		if (cpu_data_ptr->rtcPop != lastPop) {
 475  			SetIdlePop();
 476  		}
 477  	}
 478  
 479  #if KPC
 480  	kpc_idle();
 481  #endif
 482  #if MONOTONIC
 483  	mt_cpu_idle(cpu_data_ptr);
 484  #endif /* MONOTONIC */
 485  
 486  	if (wfi) {
 487  #if !defined(APPLE_ARM64_ARCH_FAMILY)
 488  		platform_cache_idle_enter();
 489  #endif
 490  
 491  #if DEVELOPMENT || DEBUG
 492  		// When simulating wfi overhead,
 493  		// force wfi to clock gating only
 494  		if (wfi == 2) {
 495  			arm64_force_wfi_clock_gate();
 496  		}
 497  #endif /* DEVELOPMENT || DEBUG */
 498  
 499  #if defined(APPLETYPHOON)
 500  		// <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
 501  		typhoon_prepare_for_wfi();
 502  #endif
 503  		__builtin_arm_dsb(DSB_SY);
 504  #if HAS_RETENTION_STATE
 505  		arm64_retention_wfi();
 506  #else
 507  		__builtin_arm_wfi();
 508  #endif
 509  
 510  #if defined(APPLETYPHOON)
 511  		// <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
 512  		typhoon_return_from_wfi();
 513  #endif
 514  
 515  #if DEVELOPMENT || DEBUG
 516  		// Handle wfi overhead simulation
 517  		if (wfi == 2) {
 518  			uint64_t deadline;
 519  
 520  			// Calculate wfi delay deadline
 521  			clock_absolutetime_interval_to_deadline(wfi_delay, &deadline);
 522  
 523  			// Flush L1 caches
 524  			if ((wfi_flags & 1) != 0) {
 525  				InvalidatePoU_Icache();
 526  				FlushPoC_Dcache();
 527  			}
 528  
 529  			// Flush TLBs
 530  			if ((wfi_flags & 2) != 0) {
 531  				flush_core_tlb();
 532  			}
 533  
 534  			// Wait for the ballance of the wfi delay
 535  			clock_delay_until(deadline);
 536  		}
 537  #endif /* DEVELOPMENT || DEBUG */
 538  #if !defined(APPLE_ARM64_ARCH_FAMILY)
 539  		platform_cache_idle_exit();
 540  #endif
 541  	}
 542  
 543  	ClearIdlePop(TRUE);
 544  
 545  	cpu_idle_exit(FALSE);
 546  }
 547  
 548  /*
 549   *	Routine:	cpu_idle_exit
 550   *	Function:
 551   */
 552  void
 553  cpu_idle_exit(boolean_t from_reset)
 554  {
 555  	uint64_t        new_idle_timeout_ticks = 0x0ULL;
 556  	cpu_data_t     *cpu_data_ptr = getCpuDatap();
 557  
 558  	assert(exception_stack_pointer() != 0);
 559  
 560  	/* Back from WFI, unlock OSLAR and EDLAR. */
 561  	if (from_reset) {
 562  		configure_coresight_registers(cpu_data_ptr);
 563  	}
 564  
 565  #if KPC
 566  	kpc_idle_exit();
 567  #endif
 568  
 569  #if MONOTONIC
 570  	mt_cpu_run(cpu_data_ptr);
 571  #endif /* MONOTONIC */
 572  
 573  	if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) {
 574  		cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
 575  	}
 576  
 577  	if (cpu_data_ptr->idle_timer_notify != NULL) {
 578  		if (new_idle_timeout_ticks == 0x0ULL) {
 579  			/* turn off the idle timer */
 580  			cpu_data_ptr->idle_timer_deadline = 0x0ULL;
 581  		} else {
 582  			/* set the new idle timeout */
 583  			clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
 584  		}
 585  		timer_resync_deadlines();
 586  	}
 587  
 588  	Idle_load_context();
 589  }
 590  
 591  void
 592  cpu_init(void)
 593  {
 594  	cpu_data_t     *cdp = getCpuDatap();
 595  	arm_cpu_info_t *cpu_info_p;
 596  
 597  	assert(exception_stack_pointer() != 0);
 598  
 599  	if (cdp->cpu_type != CPU_TYPE_ARM64) {
 600  		cdp->cpu_type = CPU_TYPE_ARM64;
 601  
 602  		timer_call_queue_init(&cdp->rtclock_timer.queue);
 603  		cdp->rtclock_timer.deadline = EndOfAllTime;
 604  
 605  		if (cdp == &BootCpuData) {
 606  			do_cpuid();
 607  			do_cacheid();
 608  			do_mvfpid();
 609  		} else {
 610  			/*
 611  			 * We initialize non-boot CPUs here; the boot CPU is
 612  			 * dealt with as part of pmap_bootstrap.
 613  			 */
 614  			pmap_cpu_data_init();
 615  		}
 616  		/* ARM_SMP: Assuming identical cpu */
 617  		do_debugid();
 618  
 619  		cpu_info_p = cpuid_info();
 620  
 621  		/* switch based on CPU's reported architecture */
 622  		switch (cpu_info_p->arm_info.arm_arch) {
 623  		case CPU_ARCH_ARMv8:
 624  			cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8;
 625  			break;
 626  		case CPU_ARCH_ARMv8E:
 627  			cdp->cpu_subtype = CPU_SUBTYPE_ARM64E;
 628  			break;
 629  		default:
 630  			//cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
 631  			/* this panic doesn't work this early in startup */
 632  			panic("Unknown CPU subtype...");
 633  			break;
 634  		}
 635  
 636  		cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
 637  	}
 638  	cdp->cpu_stat.irq_ex_cnt_wake = 0;
 639  	cdp->cpu_stat.ipi_cnt_wake = 0;
 640  #if MONOTONIC
 641  	cdp->cpu_stat.pmi_cnt_wake = 0;
 642  #endif /* MONOTONIC */
 643  	cdp->cpu_running = TRUE;
 644  	cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
 645  	cdp->cpu_sleep_token = 0x0UL;
 646  #if KPC
 647  	kpc_idle_exit();
 648  #endif /* KPC */
 649  #if MONOTONIC
 650  	mt_cpu_up(cdp);
 651  #endif /* MONOTONIC */
 652  }
 653  
 654  void
 655  cpu_stack_alloc(cpu_data_t *cpu_data_ptr)
 656  {
 657  	vm_offset_t             irq_stack = 0;
 658  	vm_offset_t             exc_stack = 0;
 659  
 660  	kern_return_t kr = kernel_memory_allocate(kernel_map, &irq_stack,
 661  	    INTSTACK_SIZE + (2 * PAGE_SIZE),
 662  	    PAGE_MASK,
 663  	    KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
 664  	    VM_KERN_MEMORY_STACK);
 665  	if (kr != KERN_SUCCESS) {
 666  		panic("Unable to allocate cpu interrupt stack\n");
 667  	}
 668  
 669  	cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE;
 670  	cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;
 671  
 672  	kr = kernel_memory_allocate(kernel_map, &exc_stack,
 673  	    EXCEPSTACK_SIZE + (2 * PAGE_SIZE),
 674  	    PAGE_MASK,
 675  	    KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
 676  	    VM_KERN_MEMORY_STACK);
 677  	if (kr != KERN_SUCCESS) {
 678  		panic("Unable to allocate cpu exception stack\n");
 679  	}
 680  
 681  	cpu_data_ptr->excepstack_top = exc_stack + PAGE_SIZE + EXCEPSTACK_SIZE;
 682  	cpu_data_ptr->excepstackptr = cpu_data_ptr->excepstack_top;
 683  }
 684  
 685  void
 686  cpu_data_free(cpu_data_t *cpu_data_ptr)
 687  {
 688  	if ((cpu_data_ptr == NULL) || (cpu_data_ptr == &BootCpuData)) {
 689  		return;
 690  	}
 691  
 692  	int cpu_number = cpu_data_ptr->cpu_number;
 693  
 694  	if (CpuDataEntries[cpu_number].cpu_data_vaddr == cpu_data_ptr) {
 695  		CpuDataEntries[cpu_number].cpu_data_vaddr = NULL;
 696  		CpuDataEntries[cpu_number].cpu_data_paddr = 0;
 697  		__builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu array are visible
 698  	}
 699  	(kfree)((void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE);
 700  	(kfree)((void *)(cpu_data_ptr->excepstack_top - EXCEPSTACK_SIZE), EXCEPSTACK_SIZE);
 701  }
 702  
 703  void
 704  cpu_data_init(cpu_data_t *cpu_data_ptr)
 705  {
 706  	uint32_t i;
 707  
 708  	cpu_data_ptr->cpu_flags = 0;
 709  	cpu_data_ptr->cpu_int_state = 0;
 710  	cpu_data_ptr->cpu_pending_ast = AST_NONE;
 711  	cpu_data_ptr->cpu_cache_dispatch = NULL;
 712  	cpu_data_ptr->rtcPop = EndOfAllTime;
 713  	cpu_data_ptr->rtclock_datap = &RTClockData;
 714  	cpu_data_ptr->cpu_user_debug = NULL;
 715  
 716  
 717  	cpu_data_ptr->cpu_base_timebase = 0;
 718  	cpu_data_ptr->cpu_idle_notify = NULL;
 719  	cpu_data_ptr->cpu_idle_latency = 0x0ULL;
 720  	cpu_data_ptr->cpu_idle_pop = 0x0ULL;
 721  	cpu_data_ptr->cpu_reset_type = 0x0UL;
 722  	cpu_data_ptr->cpu_reset_handler = 0x0UL;
 723  	cpu_data_ptr->cpu_reset_assist = 0x0UL;
 724  	cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
 725  	cpu_data_ptr->cpu_phys_id = 0x0UL;
 726  	cpu_data_ptr->cpu_l2_access_penalty = 0;
 727  	cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
 728  	cpu_data_ptr->cpu_cluster_id = 0;
 729  	cpu_data_ptr->cpu_l2_id = 0;
 730  	cpu_data_ptr->cpu_l2_size = 0;
 731  	cpu_data_ptr->cpu_l3_id = 0;
 732  	cpu_data_ptr->cpu_l3_size = 0;
 733  
 734  	cpu_data_ptr->cpu_signal = SIGPdisabled;
 735  
 736  	cpu_data_ptr->cpu_get_fiq_handler = NULL;
 737  	cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
 738  	cpu_data_ptr->cpu_tbd_hardware_val = NULL;
 739  	cpu_data_ptr->cpu_get_decrementer_func = NULL;
 740  	cpu_data_ptr->cpu_set_decrementer_func = NULL;
 741  	cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
 742  	cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
 743  	cpu_data_ptr->cpu_xcall_p0 = NULL;
 744  	cpu_data_ptr->cpu_xcall_p1 = NULL;
 745  	cpu_data_ptr->cpu_imm_xcall_p0 = NULL;
 746  	cpu_data_ptr->cpu_imm_xcall_p1 = NULL;
 747  
 748  	for (i = 0; i < CORESIGHT_REGIONS; ++i) {
 749  		cpu_data_ptr->coresight_base[i] = 0;
 750  	}
 751  
 752  #if !XNU_MONITOR
 753  	pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
 754  
 755  	pmap_cpu_data_ptr->cpu_nested_pmap = (struct pmap *) NULL;
 756  	pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
 757  	pmap_cpu_data_ptr->pv_free.list = NULL;
 758  	pmap_cpu_data_ptr->pv_free.count = 0;
 759  	pmap_cpu_data_ptr->pv_free_tail = NULL;
 760  
 761  	bzero(&(pmap_cpu_data_ptr->cpu_sw_asids[0]), sizeof(pmap_cpu_data_ptr->cpu_sw_asids));
 762  #endif
 763  	cpu_data_ptr->halt_status = CPU_NOT_HALTED;
 764  #if __ARM_KERNEL_PROTECT__
 765  	cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&exc_vectors_table;
 766  #endif /* __ARM_KERNEL_PROTECT__ */
 767  
 768  #if defined(HAS_APPLE_PAC)
 769  	cpu_data_ptr->rop_key = 0;
 770  	cpu_data_ptr->jop_key = ml_default_jop_pid();
 771  #endif
 772  
 773  }
 774  
 775  kern_return_t
 776  cpu_data_register(cpu_data_t *cpu_data_ptr)
 777  {
 778  	int     cpu = cpu_data_ptr->cpu_number;
 779  
 780  #if KASAN
 781  	for (int i = 0; i < CPUWINDOWS_MAX; i++) {
 782  		kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu, i), PAGE_SIZE);
 783  	}
 784  #endif
 785  
 786  	__builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu data are visible
 787  	CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
 788  	CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys((vm_offset_t)cpu_data_ptr);
 789  	return KERN_SUCCESS;
 790  }
 791  
 792  #if defined(KERNEL_INTEGRITY_CTRR)
 793  /* Hibernation needs to reset this state, so data and text are in the hib segment;
 794   * this allows them be accessed and executed early.
 795   */
 796  LCK_GRP_DECLARE(ctrr_cpu_start_lock_grp, "ctrr_cpu_start_lock");
 797  LCK_SPIN_DECLARE(ctrr_cpu_start_lck, &ctrr_cpu_start_lock_grp);
 798  enum ctrr_cluster_states ctrr_cluster_locked[MAX_CPU_CLUSTERS] MARK_AS_HIBERNATE_DATA;
 799  
 800  MARK_AS_HIBERNATE_TEXT
 801  void
 802  init_ctrr_cluster_states(void)
 803  {
 804  	for (int i = 0; i < MAX_CPU_CLUSTERS; i++) {
 805  		ctrr_cluster_locked[i] = CTRR_UNLOCKED;
 806  	}
 807  }
 808  #endif
 809  
 810  kern_return_t
 811  cpu_start(int cpu)
 812  {
 813  	cpu_data_t *cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
 814  
 815  	kprintf("cpu_start() cpu: %d\n", cpu);
 816  
 817  	if (cpu == cpu_number()) {
 818  		cpu_machine_init();
 819  		configure_coresight_registers(cpu_data_ptr);
 820  	} else {
 821  		thread_t first_thread;
 822  		processor_t processor;
 823  
 824  		cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
 825  
 826  #if !XNU_MONITOR
 827  		cpu_data_ptr->cpu_pmap_cpu_data.cpu_nested_pmap = NULL;
 828  #endif
 829  
 830  		processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr);
 831  		if (processor->startup_thread != THREAD_NULL) {
 832  			first_thread = processor->startup_thread;
 833  		} else {
 834  			first_thread = processor->idle_thread;
 835  		}
 836  		cpu_data_ptr->cpu_active_thread = first_thread;
 837  		first_thread->machine.CpuDatap = cpu_data_ptr;
 838  		first_thread->machine.pcpu_data_base =
 839  		    (vm_address_t)cpu_data_ptr - __PERCPU_ADDR(cpu_data);
 840  
 841  		configure_coresight_registers(cpu_data_ptr);
 842  
 843  		flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
 844  		flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
 845  #if defined(KERNEL_INTEGRITY_CTRR)
 846  
 847  		/* First CPU being started within a cluster goes ahead to lock CTRR for cluster;
 848  		 * other CPUs block until cluster is locked. */
 849  		lck_spin_lock(&ctrr_cpu_start_lck);
 850  		switch (ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id]) {
 851  		case CTRR_UNLOCKED:
 852  			ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] = CTRR_LOCKING;
 853  			lck_spin_unlock(&ctrr_cpu_start_lck);
 854  			break;
 855  		case CTRR_LOCKING:
 856  			assert_wait(&ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id], THREAD_UNINT);
 857  			lck_spin_unlock(&ctrr_cpu_start_lck);
 858  			thread_block(THREAD_CONTINUE_NULL);
 859  			assert(ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] != CTRR_LOCKING);
 860  			break;
 861  		default:         // CTRR_LOCKED
 862  			lck_spin_unlock(&ctrr_cpu_start_lck);
 863  			break;
 864  		}
 865  #endif
 866  		(void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
 867  	}
 868  
 869  	return KERN_SUCCESS;
 870  }
 871  
 872  
 873  void
 874  cpu_timebase_init(boolean_t from_boot)
 875  {
 876  	cpu_data_t *cdp = getCpuDatap();
 877  
 878  	if (cdp->cpu_get_fiq_handler == NULL) {
 879  		cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
 880  		cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
 881  		cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
 882  		cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
 883  		cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
 884  	}
 885  
 886  	if (!from_boot && (cdp == &BootCpuData)) {
 887  		/*
 888  		 * When we wake from sleep, we have no guarantee about the state
 889  		 * of the hardware timebase.  It may have kept ticking across sleep, or
 890  		 * it may have reset.
 891  		 *
 892  		 * To deal with this, we calculate an offset to the clock that will
 893  		 * produce a timebase value wake_abstime at the point the boot
 894  		 * CPU calls cpu_timebase_init on wake.
 895  		 *
 896  		 * This ensures that mach_absolute_time() stops ticking across sleep.
 897  		 */
 898  		rtclock_base_abstime = wake_abstime - ml_get_hwclock();
 899  	} else if (from_boot) {
 900  		/* On initial boot, initialize time_since_reset to CNTPCT_EL0. */
 901  		ml_set_reset_time(ml_get_hwclock());
 902  	}
 903  
 904  	cdp->cpu_decrementer = 0x7FFFFFFFUL;
 905  	cdp->cpu_timebase = 0x0UL;
 906  	cdp->cpu_base_timebase = rtclock_base_abstime;
 907  }
 908  
 909  int
 910  cpu_cluster_id(void)
 911  {
 912  	return getCpuDatap()->cpu_cluster_id;
 913  }
 914  
 915  __attribute__((noreturn))
 916  void
 917  ml_arm_sleep(void)
 918  {
 919  	cpu_data_t              *cpu_data_ptr = getCpuDatap();
 920  
 921  	if (cpu_data_ptr == &BootCpuData) {
 922  		cpu_data_t      *target_cdp;
 923  		int             cpu;
 924  		int             max_cpu;
 925  
 926  		max_cpu = ml_get_max_cpu_number();
 927  		for (cpu = 0; cpu <= max_cpu; cpu++) {
 928  			target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
 929  
 930  			if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr)) {
 931  				continue;
 932  			}
 933  
 934  			while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) {
 935  				;
 936  			}
 937  		}
 938  
 939  		/*
 940  		 * Now that the other cores have entered the sleep path, set
 941  		 * the abstime value we'll use when we resume.
 942  		 */
 943  		wake_abstime = ml_get_timebase();
 944  		ml_set_reset_time(UINT64_MAX);
 945  	} else {
 946  		CleanPoU_Dcache();
 947  	}
 948  
 949  	cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
 950  
 951  	if (cpu_data_ptr == &BootCpuData) {
 952  #if WITH_CLASSIC_S2R
 953  		// Classic suspend to RAM writes the suspend signature into the
 954  		// sleep token buffer so that iBoot knows that it's on the warm
 955  		// boot (wake) path (as opposed to the cold boot path). Newer SoC
 956  		// do not go through SecureROM/iBoot on the warm boot path. The
 957  		// reconfig engine script brings the CPU out of reset at the kernel's
 958  		// reset vector which points to the warm boot initialization code.
 959  		if (sleepTokenBuffer != (vm_offset_t) NULL) {
 960  			platform_cache_shutdown();
 961  			bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken));
 962  		} else {
 963  			panic("No sleep token buffer");
 964  		}
 965  #endif
 966  
 967  #if __ARM_GLOBAL_SLEEP_BIT__
 968  		/* Allow other CPUs to go to sleep. */
 969  		arm64_stall_sleep = FALSE;
 970  		__builtin_arm_dmb(DMB_ISH);
 971  #endif
 972  
 973  		/* Architectural debug state: <rdar://problem/12390433>:
 974  		 *      Grab debug lock EDLAR and clear bit 0 in EDPRCR,
 975  		 *      tell debugger to not prevent power gating .
 976  		 */
 977  		if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
 978  			*(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
 979  			*(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
 980  		}
 981  
 982  #if HIBERNATION
 983  		uint32_t mode = hibernate_write_image();
 984  		if (mode == kIOHibernatePostWriteHalt) {
 985  			HIBLOG("powering off after writing hibernation image\n");
 986  			int halt_result = -1;
 987  			if (PE_halt_restart) {
 988  				halt_result = (*PE_halt_restart)(kPEHaltCPU);
 989  			}
 990  			panic("can't shutdown: PE_halt_restart returned %d", halt_result);
 991  		}
 992  #endif /* HIBERNATION */
 993  
 994  #if MONOTONIC
 995  		mt_sleep();
 996  #endif /* MONOTONIC */
 997  		/* ARM64-specific preparation */
 998  		arm64_prepare_for_sleep(true);
 999  	} else {
1000  #if __ARM_GLOBAL_SLEEP_BIT__
1001  		/*
1002  		 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
1003  		 * global register to manage entering deep sleep, as opposed to a per-CPU
1004  		 * register.  We cannot update this register until all CPUs are ready to enter
1005  		 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
1006  		 * (by idling), it will hang (due to the side effects of enabling deep sleep),
1007  		 * which can hang the sleep process or cause memory corruption on wake.
1008  		 *
1009  		 * To avoid these issues, we'll stall on this global value, which CPU0 will
1010  		 * manage.
1011  		 */
1012  		while (arm64_stall_sleep) {
1013  			__builtin_arm_wfe();
1014  		}
1015  #endif
1016  		CleanPoU_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
1017  
1018  		/* Architectural debug state: <rdar://problem/12390433>:
1019  		 *      Grab debug lock EDLAR and clear bit 0 in EDPRCR,
1020  		 *      tell debugger to not prevent power gating .
1021  		 */
1022  		if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
1023  			*(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
1024  			*(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
1025  		}
1026  
1027  		/* ARM64-specific preparation */
1028  		arm64_prepare_for_sleep(true);
1029  	}
1030  }
1031  
1032  void
1033  cpu_machine_idle_init(boolean_t from_boot)
1034  {
1035  	static vm_address_t     resume_idle_cpu_paddr = (vm_address_t)NULL;
1036  	cpu_data_t              *cpu_data_ptr   = getCpuDatap();
1037  
1038  	if (from_boot) {
1039  		int             wfi_tmp = 1;
1040  		uint32_t        production = 1;
1041  		DTEntry         entry;
1042  
1043  		unsigned long   jtag = 0;
1044  
1045  		if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) {
1046  			if (jtag != 0) {
1047  				idle_enable = FALSE;
1048  			} else {
1049  				idle_enable = TRUE;
1050  			}
1051  		} else {
1052  			idle_enable = TRUE;
1053  		}
1054  
1055  #if DEVELOPMENT || DEBUG
1056  		uint32_t wfe_mode = 0;
1057  		if (PE_parse_boot_argn("wfe_mode", &wfe_mode, sizeof(wfe_mode))) {
1058  			idle_proximate_timer_wfe = ((wfe_mode & 1) == 1);
1059  			idle_proximate_io_wfe = ((wfe_mode & 2) == 2);
1060  		}
1061  #endif
1062  		PE_parse_boot_argn("wfi", &wfi_tmp, sizeof(wfi_tmp));
1063  
1064  		// bits 7..0 give the wfi type
1065  		switch (wfi_tmp & 0xff) {
1066  		case 0:
1067  			// disable wfi
1068  			wfi = 0;
1069  			break;
1070  
1071  #if DEVELOPMENT || DEBUG
1072  		case 2:
1073  			// wfi overhead simulation
1074  			// 31..16 - wfi delay is us
1075  			// 15..8  - flags
1076  			// 7..0   - 2
1077  			wfi = 2;
1078  			wfi_flags = (wfi_tmp >> 8) & 0xFF;
1079  			nanoseconds_to_absolutetime(((wfi_tmp >> 16) & 0xFFFF) * NSEC_PER_MSEC, &wfi_delay);
1080  			break;
1081  #endif /* DEVELOPMENT || DEBUG */
1082  
1083  		case 1:
1084  		default:
1085  			// do nothing
1086  			break;
1087  		}
1088  
1089  		ResetHandlerData.assist_reset_handler = 0;
1090  		ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries);
1091  
1092  #ifdef MONITOR
1093  		monitor_call(MONITOR_SET_ENTRY, (uintptr_t)ml_static_vtop((vm_offset_t)&LowResetVectorBase), 0, 0);
1094  #elif !defined(NO_MONITOR)
1095  #error MONITOR undefined, WFI power gating may not operate correctly
1096  #endif /* MONITOR */
1097  
1098  		// Determine if we are on production or debug chip
1099  		if (kSuccess == SecureDTLookupEntry(NULL, "/chosen", &entry)) {
1100  			unsigned int    size;
1101  			void const      *prop;
1102  
1103  			if (kSuccess == SecureDTGetProperty(entry, "effective-production-status-ap", &prop, &size)) {
1104  				if (size == 4) {
1105  					bcopy(prop, &production, size);
1106  				}
1107  			}
1108  		}
1109  		if (!production) {
1110  #if defined(APPLE_ARM64_ARCH_FAMILY)
1111  			// Enable coresight debug registers on debug-fused chips
1112  			coresight_debug_enabled = TRUE;
1113  #endif
1114  		}
1115  
1116  		start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
1117  		resume_idle_cpu_paddr = ml_static_vtop((vm_offset_t)&resume_idle_cpu);
1118  	}
1119  
1120  #if WITH_CLASSIC_S2R
1121  	if (cpu_data_ptr == &BootCpuData) {
1122  		static addr64_t SleepToken_low_paddr = (addr64_t)NULL;
1123  		if (sleepTokenBuffer != (vm_offset_t) NULL) {
1124  			SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer);
1125  		} else {
1126  			panic("No sleep token buffer");
1127  		}
1128  
1129  		bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature),
1130  		    SleepToken_low_paddr, sizeof(SleepToken));
1131  		flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE);
1132  	}
1133  	;
1134  #endif
1135  
1136  	cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
1137  	clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
1138  }
1139  
1140  _Atomic uint32_t cpu_idle_count = 0;
1141  
1142  void
1143  machine_track_platform_idle(boolean_t entry)
1144  {
1145  	if (entry) {
1146  		os_atomic_inc(&cpu_idle_count, relaxed);
1147  	} else {
1148  		os_atomic_dec(&cpu_idle_count, relaxed);
1149  	}
1150  }
1151  
1152  #if WITH_CLASSIC_S2R
1153  void
1154  sleep_token_buffer_init(void)
1155  {
1156  	cpu_data_t      *cpu_data_ptr = getCpuDatap();
1157  	DTEntry         entry;
1158  	size_t          size;
1159  	void const * const *prop;
1160  
1161  	if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) {
1162  		/* Find the stpage node in the device tree */
1163  		if (kSuccess != SecureDTLookupEntry(0, "stram", &entry)) {
1164  			return;
1165  		}
1166  
1167  		if (kSuccess != SecureDTGetProperty(entry, "reg", (const void **)&prop, (unsigned int *)&size)) {
1168  			return;
1169  		}
1170  
1171  		/* Map the page into the kernel space */
1172  		sleepTokenBuffer = ml_io_map(((vm_offset_t const *)prop)[0], ((vm_size_t const *)prop)[1]);
1173  	}
1174  }
1175  #endif