/ duct-tape / xnu / osfmk / arm64 / kpc.c
kpc.c
   1  /*
   2   * Copyright (c) 2012-2018 Apple Inc. All rights reserved.
   3   *
   4   * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
   5   *
   6   * This file contains Original Code and/or Modifications of Original Code
   7   * as defined in and that are subject to the Apple Public Source License
   8   * Version 2.0 (the 'License'). You may not use this file except in
   9   * compliance with the License. The rights granted to you under the License
  10   * may not be used to create, or enable the creation or redistribution of,
  11   * unlawful or unlicensed copies of an Apple operating system, or to
  12   * circumvent, violate, or enable the circumvention or violation of, any
  13   * terms of an Apple operating system software license agreement.
  14   *
  15   * Please obtain a copy of the License at
  16   * http://www.opensource.apple.com/apsl/ and read it before using this file.
  17   *
  18   * The Original Code and all software distributed under the License are
  19   * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  20   * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  21   * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  22   * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  23   * Please see the License for the specific language governing rights and
  24   * limitations under the License.
  25   *
  26   * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  27   */
  28  
  29  #include <arm/cpu_data_internal.h>
  30  #include <arm/cpu_internal.h>
  31  #include <kern/cpu_number.h>
  32  #include <kern/kpc.h>
  33  #include <kern/thread.h>
  34  #include <kern/processor.h>
  35  #include <mach/mach_types.h>
  36  #include <machine/machine_routines.h>
  37  #include <stdint.h>
  38  #include <sys/errno.h>
  39  
  40  #if APPLE_ARM64_ARCH_FAMILY
  41  
  42  #if MONOTONIC
  43  #include <kern/monotonic.h>
  44  #endif /* MONOTONIC */
  45  
  46  void kpc_pmi_handler(unsigned int ctr);
  47  
  48  /*
  49   * PMCs 8 and 9 were added to Hurricane and to maintain the existing bit
  50   * positions of the other PMCs, their configuration bits start at position 32.
  51   */
  52  #define PMCR_PMC_8_9_OFFSET     (32)
  53  #define PMCR_PMC_8_9_SHIFT(PMC) (((PMC) - 8) + PMCR_PMC_8_9_OFFSET)
  54  #define PMCR_PMC_SHIFT(PMC)     (((PMC) <= 7) ? (PMC) : \
  55  	                          PMCR_PMC_8_9_SHIFT(PMC))
  56  
  57  /*
  58   * PMCR0 controls enabling, interrupts, and overflow of performance counters.
  59   */
  60  
  61  /* PMC is enabled */
  62  #define PMCR0_PMC_ENABLE_MASK(PMC)  (UINT64_C(0x1) << PMCR_PMC_SHIFT(PMC))
  63  #define PMCR0_PMC_DISABLE_MASK(PMC) (~PMCR0_PMC_ENABLE_MASK(PMC))
  64  
  65  /* overflow on a PMC generates an interrupt */
  66  #define PMCR0_PMI_OFFSET            (12)
  67  #define PMCR0_PMI_SHIFT(PMC)        (PMCR0_PMI_OFFSET + PMCR_PMC_SHIFT(PMC))
  68  #define PMCR0_PMI_ENABLE_MASK(PMC)  (UINT64_C(1) << PMCR0_PMI_SHIFT(PMC))
  69  #define PMCR0_PMI_DISABLE_MASK(PMC) (~PMCR0_PMI_ENABLE_MASK(PMC))
  70  
  71  /* disable counting when a PMI is signaled (except for AIC interrupts) */
  72  #define PMCR0_DISCNT_SHIFT        (20)
  73  #define PMCR0_DISCNT_ENABLE_MASK  (UINT64_C(1) << PMCR0_DISCNT_SHIFT)
  74  #define PMCR0_DISCNT_DISABLE_MASK (~PMCR0_DISCNT_ENABLE_MASK)
  75  
  76  /* 21 unused */
  77  
  78  /* block PMIs until ERET retires */
  79  #define PMCR0_WFRFE_SHIFT        (22)
  80  #define PMCR0_WFRFE_ENABLE_MASK  (UINT64_C(1) << PMCR0_WFRE_SHIFT)
  81  #define PMCR0_WFRFE_DISABLE_MASK (~PMCR0_WFRFE_ENABLE_MASK)
  82  
  83  /* count global L2C events */
  84  #define PMCR0_L2CGLOBAL_SHIFT        (23)
  85  #define PMCR0_L2CGLOBAL_ENABLE_MASK  (UINT64_C(1) << PMCR0_L2CGLOBAL_SHIFT)
  86  #define PMCR0_L2CGLOBAL_DISABLE_MASK (~PMCR0_L2CGLOBAL_ENABLE_MASK)
  87  
  88  /* allow user mode access to configuration registers */
  89  #define PMCR0_USEREN_SHIFT        (30)
  90  #define PMCR0_USEREN_ENABLE_MASK  (UINT64_C(1) << PMCR0_USEREN_SHIFT)
  91  #define PMCR0_USEREN_DISABLE_MASK (~PMCR0_USEREN_ENABLE_MASK)
  92  
  93  /* force the CPMU clocks in case of a clocking bug */
  94  #define PMCR0_CLKEN_SHIFT        (31)
  95  #define PMCR0_CLKEN_ENABLE_MASK  (UINT64_C(1) << PMCR0_CLKEN_SHIFT)
  96  #define PMCR0_CLKEN_DISABLE_MASK (~PMCR0_CLKEN_ENABLE_MASK)
  97  
  98  /* 32 - 44 mirror the low bits for PMCs 8 and 9 */
  99  
 100  /* PMCR1 enables counters in different processor modes */
 101  
 102  #define PMCR1_EL0_A32_OFFSET (0)
 103  #define PMCR1_EL0_A64_OFFSET (8)
 104  #define PMCR1_EL1_A64_OFFSET (16)
 105  #define PMCR1_EL3_A64_OFFSET (24)
 106  
 107  #define PMCR1_EL0_A32_SHIFT(PMC) (PMCR1_EL0_A32_OFFSET + PMCR_PMC_SHIFT(PMC))
 108  #define PMCR1_EL0_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
 109  #define PMCR1_EL1_A64_SHIFT(PMC) (PMCR1_EL1_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
 110  #define PMCR1_EL3_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
 111  
 112  #define PMCR1_EL0_A32_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A32_SHIFT(PMC))
 113  #define PMCR1_EL0_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A64_SHIFT(PMC))
 114  #define PMCR1_EL1_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL1_A64_SHIFT(PMC))
 115  /* PMCR1_EL3_A64 is not supported on PMCs 8 and 9 */
 116  #if NO_MONITOR
 117  #define PMCR1_EL3_A64_ENABLE_MASK(PMC) UINT64_C(0)
 118  #else
 119  #define PMCR1_EL3_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL3_A64_SHIFT(PMC))
 120  #endif
 121  
 122  #define PMCR1_EL_ALL_ENABLE_MASK(PMC) (PMCR1_EL0_A32_ENABLE_MASK(PMC) | \
 123  	                               PMCR1_EL0_A64_ENABLE_MASK(PMC) | \
 124  	                               PMCR1_EL1_A64_ENABLE_MASK(PMC) | \
 125  	                               PMCR1_EL3_A64_ENABLE_MASK(PMC))
 126  #define PMCR1_EL_ALL_DISABLE_MASK(PMC) (~PMCR1_EL_ALL_ENABLE_MASK(PMC))
 127  
 128  /* PMESR0 and PMESR1 are event selection registers */
 129  
 130  /* PMESR0 selects which event is counted on PMCs 2, 3, 4, and 5 */
 131  /* PMESR1 selects which event is counted on PMCs 6, 7, 8, and 9 */
 132  
 133  #define PMESR_PMC_WIDTH           (8)
 134  #define PMESR_PMC_MASK            (UINT8_MAX)
 135  #define PMESR_SHIFT(PMC, OFF)     (8 * ((PMC) - (OFF)))
 136  #define PMESR_EVT_MASK(PMC, OFF)  (PMESR_PMC_MASK << PMESR_SHIFT(PMC, OFF))
 137  #define PMESR_EVT_CLEAR(PMC, OFF) (~PMESR_EVT_MASK(PMC, OFF))
 138  
 139  #define PMESR_EVT_DECODE(PMESR, PMC, OFF) \
 140  	(((PMESR) >> PMESR_SHIFT(PMC, OFF)) & PMESR_PMC_MASK)
 141  #define PMESR_EVT_ENCODE(EVT, PMC, OFF) \
 142  	(((EVT) & PMESR_PMC_MASK) << PMESR_SHIFT(PMC, OFF))
 143  
 144  /*
 145   * The low 8 bits of a configuration words select the event to program on
 146   * PMESR{0,1}. Bits 16-19 are mapped to PMCR1 bits.
 147   */
 148  #define CFGWORD_EL0A32EN_MASK (0x10000)
 149  #define CFGWORD_EL0A64EN_MASK (0x20000)
 150  #define CFGWORD_EL1EN_MASK    (0x40000)
 151  #define CFGWORD_EL3EN_MASK    (0x80000)
 152  #define CFGWORD_ALLMODES_MASK (0xf0000)
 153  
 154  /* ACC offsets for PIO */
 155  #define ACC_CPMU_PMC0_OFFSET (0x200)
 156  #define ACC_CPMU_PMC8_OFFSET (0x280)
 157  
 158  /*
 159   * Macros for reading and writing system registers.
 160   *
 161   * SR must be one of the SREG_* defines above.
 162   */
 163  #define SREG_WRITE(SR, V) __asm__ volatile("msr " SR ", %0 ; isb" : : "r"(V))
 164  #define SREG_READ(SR)     ({ uint64_t VAL; \
 165  	                     __asm__ volatile("mrs %0, " SR : "=r"(VAL)); \
 166  	                     VAL; })
 167  
 168  /*
 169   * Configuration registers that can be controlled by RAWPMU:
 170   *
 171   * All: PMCR2-4, OPMAT0-1, OPMSK0-1.
 172   * Typhoon/Twister/Hurricane: PMMMAP, PMTRHLD2/4/6.
 173   */
 174  #if HAS_EARLY_APPLE_CPMU
 175  #define RAWPMU_CONFIG_COUNT 7
 176  #else /* HAS_EARLY_APPLE_CPMU */
 177  #define RAWPMU_CONFIG_COUNT 11
 178  #endif /* !HAS_EARLY_APPLE_CPMU */
 179  
 180  /* TODO: allocate dynamically */
 181  static uint64_t saved_PMCR[MAX_CPUS][2];
 182  static uint64_t saved_PMESR[MAX_CPUS][2];
 183  static uint64_t saved_RAWPMU[MAX_CPUS][RAWPMU_CONFIG_COUNT];
 184  static uint64_t saved_counter[MAX_CPUS][KPC_MAX_COUNTERS];
 185  static uint64_t kpc_running_cfg_pmc_mask = 0;
 186  static uint32_t kpc_running_classes = 0;
 187  static uint32_t kpc_configured = 0;
 188  
 189  #ifdef KPC_DEBUG
 190  static void
 191  dump_regs(void)
 192  {
 193  	uint64_t val;
 194  	kprintf("PMCR0 = 0x%" PRIx64 "\n", SREG_READ("PMCR0_EL1"));
 195  	kprintf("PMCR1 = 0x%" PRIx64 "\n", SREG_READ("PMCR1_EL1"));
 196  	kprintf("PMCR2 = 0x%" PRIx64 "\n", SREG_READ("PMCR2_EL1"));
 197  	kprintf("PMCR3 = 0x%" PRIx64 "\n", SREG_READ("PMCR3_EL1"));
 198  	kprintf("PMCR4 = 0x%" PRIx64 "\n", SREG_READ("PMCR4_EL1"));
 199  	kprintf("PMESR0 = 0x%" PRIx64 "\n", SREG_READ("PMESR0_EL1"));
 200  	kprintf("PMESR1 = 0x%" PRIx64 "\n", SREG_READ("PMESR1_EL1"));
 201  
 202  	kprintf("PMC0 = 0x%" PRIx64 "\n", SREG_READ("PMC0"));
 203  	kprintf("PMC1 = 0x%" PRIx64 "\n", SREG_READ("PMC1"));
 204  	kprintf("PMC2 = 0x%" PRIx64 "\n", SREG_READ("PMC2"));
 205  	kprintf("PMC3 = 0x%" PRIx64 "\n", SREG_READ("PMC3"));
 206  	kprintf("PMC4 = 0x%" PRIx64 "\n", SREG_READ("PMC4"));
 207  	kprintf("PMC5 = 0x%" PRIx64 "\n", SREG_READ("PMC5"));
 208  	kprintf("PMC6 = 0x%" PRIx64 "\n", SREG_READ("PMC6"));
 209  	kprintf("PMC7 = 0x%" PRIx64 "\n", SREG_READ("PMC7"));
 210  
 211  #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
 212  	kprintf("PMC8 = 0x%" PRIx64 "\n", SREG_READ("PMC8"));
 213  	kprintf("PMC9 = 0x%" PRIx64 "\n", SREG_READ("PMC9"));
 214  #endif
 215  }
 216  #endif
 217  
 218  static boolean_t
 219  enable_counter(uint32_t counter)
 220  {
 221  	uint64_t pmcr0 = 0;
 222  	boolean_t counter_running, pmi_enabled, enabled;
 223  
 224  	pmcr0 = SREG_READ("PMCR0_EL1") | 0x3 /* leave the fixed counters enabled for monotonic */;
 225  
 226  	counter_running = (pmcr0 & PMCR0_PMC_ENABLE_MASK(counter)) != 0;
 227  	pmi_enabled = (pmcr0 & PMCR0_PMI_ENABLE_MASK(counter)) != 0;
 228  
 229  	enabled = counter_running && pmi_enabled;
 230  
 231  	if (!enabled) {
 232  		pmcr0 |= PMCR0_PMC_ENABLE_MASK(counter);
 233  		pmcr0 |= PMCR0_PMI_ENABLE_MASK(counter);
 234  		SREG_WRITE("PMCR0_EL1", pmcr0);
 235  	}
 236  
 237  	return enabled;
 238  }
 239  
 240  static boolean_t
 241  disable_counter(uint32_t counter)
 242  {
 243  	uint64_t pmcr0;
 244  	boolean_t enabled;
 245  
 246  	if (counter < 2) {
 247  		return true;
 248  	}
 249  
 250  	pmcr0 = SREG_READ("PMCR0_EL1") | 0x3;
 251  	enabled = (pmcr0 & PMCR0_PMC_ENABLE_MASK(counter)) != 0;
 252  
 253  	if (enabled) {
 254  		pmcr0 &= PMCR0_PMC_DISABLE_MASK(counter);
 255  		SREG_WRITE("PMCR0_EL1", pmcr0);
 256  	}
 257  
 258  	return enabled;
 259  }
 260  
 261  /*
 262   * Enable counter in processor modes determined by configuration word.
 263   */
 264  static void
 265  set_modes(uint32_t counter, kpc_config_t cfgword)
 266  {
 267  	uint64_t bits = 0;
 268  	int cpuid = cpu_number();
 269  
 270  	if (cfgword & CFGWORD_EL0A32EN_MASK) {
 271  		bits |= PMCR1_EL0_A32_ENABLE_MASK(counter);
 272  	}
 273  	if (cfgword & CFGWORD_EL0A64EN_MASK) {
 274  		bits |= PMCR1_EL0_A64_ENABLE_MASK(counter);
 275  	}
 276  	if (cfgword & CFGWORD_EL1EN_MASK) {
 277  		bits |= PMCR1_EL1_A64_ENABLE_MASK(counter);
 278  	}
 279  #if !NO_MONITOR
 280  	if (cfgword & CFGWORD_EL3EN_MASK) {
 281  		bits |= PMCR1_EL3_A64_ENABLE_MASK(counter);
 282  	}
 283  #endif
 284  
 285  	/*
 286  	 * Backwards compatibility: Writing a non-zero configuration word with
 287  	 * all zeros in bits 16-19 is interpreted as enabling in all modes.
 288  	 * This matches the behavior when the PMCR1 bits weren't exposed.
 289  	 */
 290  	if (bits == 0 && cfgword != 0) {
 291  		bits = PMCR1_EL_ALL_ENABLE_MASK(counter);
 292  	}
 293  
 294  	uint64_t pmcr1 = SREG_READ("PMCR1_EL1");
 295  	pmcr1 &= PMCR1_EL_ALL_DISABLE_MASK(counter);
 296  	pmcr1 |= bits;
 297  	pmcr1 |= 0x30303; /* monotonic compatibility */
 298  	SREG_WRITE("PMCR1_EL1", pmcr1);
 299  	saved_PMCR[cpuid][1] = pmcr1;
 300  }
 301  
 302  static uint64_t
 303  read_counter(uint32_t counter)
 304  {
 305  	switch (counter) {
 306  	// case 0: return SREG_READ("PMC0");
 307  	// case 1: return SREG_READ("PMC1");
 308  	case 2: return SREG_READ("PMC2");
 309  	case 3: return SREG_READ("PMC3");
 310  	case 4: return SREG_READ("PMC4");
 311  	case 5: return SREG_READ("PMC5");
 312  	case 6: return SREG_READ("PMC6");
 313  	case 7: return SREG_READ("PMC7");
 314  #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
 315  	case 8: return SREG_READ("PMC8");
 316  	case 9: return SREG_READ("PMC9");
 317  #endif
 318  	default: return 0;
 319  	}
 320  }
 321  
 322  static void
 323  write_counter(uint32_t counter, uint64_t value)
 324  {
 325  	switch (counter) {
 326  	// case 0: SREG_WRITE("PMC0", value); break;
 327  	// case 1: SREG_WRITE("PMC1", value); break;
 328  	case 2: SREG_WRITE("PMC2", value); break;
 329  	case 3: SREG_WRITE("PMC3", value); break;
 330  	case 4: SREG_WRITE("PMC4", value); break;
 331  	case 5: SREG_WRITE("PMC5", value); break;
 332  	case 6: SREG_WRITE("PMC6", value); break;
 333  	case 7: SREG_WRITE("PMC7", value); break;
 334  #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
 335  	case 8: SREG_WRITE("PMC8", value); break;
 336  	case 9: SREG_WRITE("PMC9", value); break;
 337  #endif
 338  	default: break;
 339  	}
 340  }
 341  
 342  uint32_t
 343  kpc_rawpmu_config_count(void)
 344  {
 345  	return RAWPMU_CONFIG_COUNT;
 346  }
 347  
 348  int
 349  kpc_get_rawpmu_config(kpc_config_t *configv)
 350  {
 351  	configv[0] = SREG_READ("PMCR2_EL1");
 352  	configv[1] = SREG_READ("PMCR3_EL1");
 353  	configv[2] = SREG_READ("PMCR4_EL1");
 354  	configv[3] = SREG_READ("OPMAT0_EL1");
 355  	configv[4] = SREG_READ("OPMAT1_EL1");
 356  	configv[5] = SREG_READ("OPMSK0_EL1");
 357  	configv[6] = SREG_READ("OPMSK1_EL1");
 358  #if RAWPMU_CONFIG_COUNT > 7
 359  	configv[7] = SREG_READ("PMMMAP_EL1");
 360  	configv[8] = SREG_READ("PMTRHLD2_EL1");
 361  	configv[9] = SREG_READ("PMTRHLD4_EL1");
 362  	configv[10] = SREG_READ("PMTRHLD6_EL1");
 363  #endif
 364  	return 0;
 365  }
 366  
 367  static int
 368  kpc_set_rawpmu_config(kpc_config_t *configv)
 369  {
 370  	SREG_WRITE("PMCR2_EL1", configv[0]);
 371  	SREG_WRITE("PMCR3_EL1", configv[1]);
 372  	SREG_WRITE("PMCR4_EL1", configv[2]);
 373  	SREG_WRITE("OPMAT0_EL1", configv[3]);
 374  	SREG_WRITE("OPMAT1_EL1", configv[4]);
 375  	SREG_WRITE("OPMSK0_EL1", configv[5]);
 376  	SREG_WRITE("OPMSK1_EL1", configv[6]);
 377  #if RAWPMU_CONFIG_COUNT > 7
 378  	SREG_WRITE("PMMMAP_EL1", configv[7]);
 379  	SREG_WRITE("PMTRHLD2_EL1", configv[8]);
 380  	SREG_WRITE("PMTRHLD4_EL1", configv[9]);
 381  	SREG_WRITE("PMTRHLD6_EL1", configv[10]);
 382  #endif
 383  	return 0;
 384  }
 385  
 386  static void
 387  save_regs(void)
 388  {
 389  	int cpuid = cpu_number();
 390  
 391  	__asm__ volatile ("dmb ish");
 392  
 393  	assert(ml_get_interrupts_enabled() == FALSE);
 394  
 395  	/* Save event selections. */
 396  	saved_PMESR[cpuid][0] = SREG_READ("PMESR0_EL1");
 397  	saved_PMESR[cpuid][1] = SREG_READ("PMESR1_EL1");
 398  
 399  	kpc_get_rawpmu_config(saved_RAWPMU[cpuid]);
 400  
 401  	/* Disable the counters. */
 402  	// SREG_WRITE("PMCR0_EL1", clear);
 403  
 404  	/* Finally, save state for each counter*/
 405  	for (int i = 2; i < KPC_ARM64_PMC_COUNT; i++) {
 406  		saved_counter[cpuid][i] = read_counter(i);
 407  	}
 408  }
 409  
 410  static void
 411  restore_regs(void)
 412  {
 413  	int cpuid = cpu_number();
 414  
 415  	/* Restore PMESR values. */
 416  	SREG_WRITE("PMESR0_EL1", saved_PMESR[cpuid][0]);
 417  	SREG_WRITE("PMESR1_EL1", saved_PMESR[cpuid][1]);
 418  
 419  	kpc_set_rawpmu_config(saved_RAWPMU[cpuid]);
 420  
 421  	/* Restore counter values */
 422  	for (int i = 2; i < KPC_ARM64_PMC_COUNT; i++) {
 423  		write_counter(i, saved_counter[cpuid][i]);
 424  	}
 425  
 426  	/* Restore PMCR0/1 values (with PMCR0 last to enable). */
 427  	SREG_WRITE("PMCR1_EL1", saved_PMCR[cpuid][1] | 0x30303);
 428  }
 429  
 430  static uint64_t
 431  get_counter_config(uint32_t counter)
 432  {
 433  	uint64_t pmesr;
 434  
 435  	switch (counter) {
 436  	case 2:         /* FALLTHROUGH */
 437  	case 3:         /* FALLTHROUGH */
 438  	case 4:         /* FALLTHROUGH */
 439  	case 5:
 440  		pmesr = PMESR_EVT_DECODE(SREG_READ("PMESR0_EL1"), counter, 2);
 441  		break;
 442  	case 6:         /* FALLTHROUGH */
 443  	case 7:
 444  #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
 445  	/* FALLTHROUGH */
 446  	case 8:         /* FALLTHROUGH */
 447  	case 9:
 448  #endif
 449  		pmesr = PMESR_EVT_DECODE(SREG_READ("PMESR1_EL1"), counter, 6);
 450  		break;
 451  	default:
 452  		pmesr = 0;
 453  		break;
 454  	}
 455  
 456  	kpc_config_t config = pmesr;
 457  
 458  	uint64_t pmcr1 = SREG_READ("PMCR1_EL1");
 459  
 460  	if (pmcr1 & PMCR1_EL0_A32_ENABLE_MASK(counter)) {
 461  		config |= CFGWORD_EL0A32EN_MASK;
 462  	}
 463  	if (pmcr1 & PMCR1_EL0_A64_ENABLE_MASK(counter)) {
 464  		config |= CFGWORD_EL0A64EN_MASK;
 465  	}
 466  	if (pmcr1 & PMCR1_EL1_A64_ENABLE_MASK(counter)) {
 467  		config |= CFGWORD_EL1EN_MASK;
 468  #if NO_MONITOR
 469  		config |= CFGWORD_EL3EN_MASK;
 470  #endif
 471  	}
 472  #if !NO_MONITOR
 473  	if (pmcr1 & PMCR1_EL3_A64_ENABLE_MASK(counter)) {
 474  		config |= CFGWORD_EL3EN_MASK;
 475  	}
 476  #endif
 477  
 478  	return config;
 479  }
 480  
 481  static void
 482  set_counter_config(uint32_t counter, uint64_t config)
 483  {
 484  	int cpuid = cpu_number();
 485  	uint64_t pmesr = 0;
 486  
 487  	switch (counter) {
 488  	case 2:         /* FALLTHROUGH */
 489  	case 3:         /* FALLTHROUGH */
 490  	case 4:         /* FALLTHROUGH */
 491  	case 5:
 492  		pmesr = SREG_READ("PMESR0_EL1");
 493  		pmesr &= PMESR_EVT_CLEAR(counter, 2);
 494  		pmesr |= PMESR_EVT_ENCODE(config, counter, 2);
 495  		SREG_WRITE("PMESR0_EL1", pmesr);
 496  		saved_PMESR[cpuid][0] = pmesr;
 497  		break;
 498  
 499  	case 6:         /* FALLTHROUGH */
 500  	case 7:
 501  #if KPC_ARM64_CONFIGURABLE_COUNT > 6
 502  	/* FALLTHROUGH */
 503  	case 8:         /* FALLTHROUGH */
 504  	case 9:
 505  #endif
 506  		pmesr = SREG_READ("PMESR1_EL1");
 507  		pmesr &= PMESR_EVT_CLEAR(counter, 6);
 508  		pmesr |= PMESR_EVT_ENCODE(config, counter, 6);
 509  		SREG_WRITE("PMESR1_EL1", pmesr);
 510  		saved_PMESR[cpuid][1] = pmesr;
 511  		break;
 512  	default:
 513  		break;
 514  	}
 515  
 516  	set_modes(counter, config);
 517  }
 518  
 519  /* internal functions */
 520  
 521  void
 522  kpc_arch_init(void)
 523  {
 524  }
 525  
 526  boolean_t
 527  kpc_is_running_fixed(void)
 528  {
 529  	return (kpc_running_classes & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK;
 530  }
 531  
 532  boolean_t
 533  kpc_is_running_configurable(uint64_t pmc_mask)
 534  {
 535  	assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
 536  	return ((kpc_running_classes & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK) &&
 537  	       ((kpc_running_cfg_pmc_mask & pmc_mask) == pmc_mask);
 538  }
 539  
 540  uint32_t
 541  kpc_fixed_count(void)
 542  {
 543  	return KPC_ARM64_FIXED_COUNT;
 544  }
 545  
 546  uint32_t
 547  kpc_configurable_count(void)
 548  {
 549  	return KPC_ARM64_CONFIGURABLE_COUNT;
 550  }
 551  
 552  uint32_t
 553  kpc_fixed_config_count(void)
 554  {
 555  	return 0;
 556  }
 557  
 558  uint32_t
 559  kpc_configurable_config_count(uint64_t pmc_mask)
 560  {
 561  	assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
 562  	return kpc_popcount(pmc_mask);
 563  }
 564  
 565  int
 566  kpc_get_fixed_config(kpc_config_t *configv __unused)
 567  {
 568  	return 0;
 569  }
 570  
 571  uint64_t
 572  kpc_fixed_max(void)
 573  {
 574  	return (1ULL << KPC_ARM64_COUNTER_WIDTH) - 1;
 575  }
 576  
 577  uint64_t
 578  kpc_configurable_max(void)
 579  {
 580  	return (1ULL << KPC_ARM64_COUNTER_WIDTH) - 1;
 581  }
 582  
 583  static void
 584  set_running_configurable(uint64_t target_mask, uint64_t state_mask)
 585  {
 586  	uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
 587  	boolean_t enabled;
 588  
 589  	enabled = ml_set_interrupts_enabled(FALSE);
 590  
 591  	for (uint32_t i = 0; i < cfg_count; ++i) {
 592  		if (((1ULL << i) & target_mask) == 0) {
 593  			continue;
 594  		}
 595  		assert(kpc_controls_counter(offset + i));
 596  
 597  		if ((1ULL << i) & state_mask) {
 598  			enable_counter(offset + i);
 599  		} else {
 600  			disable_counter(offset + i);
 601  		}
 602  	}
 603  
 604  	ml_set_interrupts_enabled(enabled);
 605  }
 606  
 607  static uint32_t kpc_xcall_sync;
 608  static void
 609  kpc_set_running_xcall( void *vstate )
 610  {
 611  	struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate;
 612  	assert(mp_config);
 613  
 614  	set_running_configurable(mp_config->cfg_target_mask,
 615  	    mp_config->cfg_state_mask);
 616  
 617  	if (os_atomic_dec(&kpc_xcall_sync, relaxed) == 0) {
 618  		thread_wakeup((event_t) &kpc_xcall_sync);
 619  	}
 620  }
 621  
 622  static uint32_t kpc_xread_sync;
 623  static void
 624  kpc_get_curcpu_counters_xcall(void *args)
 625  {
 626  	struct kpc_get_counters_remote *handler = args;
 627  
 628  	assert(handler != NULL);
 629  	assert(handler->buf != NULL);
 630  
 631  	int offset = cpu_number() * handler->buf_stride;
 632  	int r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]);
 633  
 634  	/* number of counters added by this CPU, needs to be atomic  */
 635  	os_atomic_add(&(handler->nb_counters), r, relaxed);
 636  
 637  	if (os_atomic_dec(&kpc_xread_sync, relaxed) == 0) {
 638  		thread_wakeup((event_t) &kpc_xread_sync);
 639  	}
 640  }
 641  
 642  int
 643  kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf)
 644  {
 645  	assert(buf != NULL);
 646  
 647  	int enabled = ml_set_interrupts_enabled(FALSE);
 648  
 649  	/* grab counters and CPU number as close as possible */
 650  	if (curcpu) {
 651  		*curcpu = cpu_number();
 652  	}
 653  
 654  	struct kpc_get_counters_remote hdl = {
 655  		.classes = classes,
 656  		.nb_counters = 0,
 657  		.buf = buf,
 658  		.buf_stride = kpc_get_counter_count(classes)
 659  	};
 660  
 661  	cpu_broadcast_xcall(&kpc_xread_sync, TRUE, kpc_get_curcpu_counters_xcall, &hdl);
 662  	int offset = hdl.nb_counters;
 663  
 664  	(void)ml_set_interrupts_enabled(enabled);
 665  
 666  	return offset;
 667  }
 668  
 669  int
 670  kpc_get_fixed_counters(uint64_t *counterv)
 671  {
 672  #if MONOTONIC
 673  	mt_fixed_counts(counterv);
 674  	return 0;
 675  #else /* MONOTONIC */
 676  #pragma unused(counterv)
 677  	return ENOTSUP;
 678  #endif /* !MONOTONIC */
 679  }
 680  
 681  int
 682  kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask)
 683  {
 684  	uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
 685  	uint64_t ctr = 0ULL;
 686  
 687  	assert(counterv);
 688  
 689  	for (uint32_t i = 0; i < cfg_count; ++i) {
 690  		if (((1ULL << i) & pmc_mask) == 0) {
 691  			continue;
 692  		}
 693  		ctr = read_counter(i + offset);
 694  
 695  		if (ctr & KPC_ARM64_COUNTER_OVF_MASK) {
 696  			ctr = CONFIGURABLE_SHADOW(i) +
 697  			    (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) +
 698  			    (ctr & KPC_ARM64_COUNTER_MASK);
 699  		} else {
 700  			ctr = CONFIGURABLE_SHADOW(i) +
 701  			    (ctr - CONFIGURABLE_RELOAD(i));
 702  		}
 703  
 704  		*counterv++ = ctr;
 705  	}
 706  
 707  	return 0;
 708  }
 709  
 710  int
 711  kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
 712  {
 713  	uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
 714  
 715  	assert(configv);
 716  
 717  	for (uint32_t i = 0; i < cfg_count; ++i) {
 718  		if ((1ULL << i) & pmc_mask) {
 719  			*configv++ = get_counter_config(i + offset);
 720  		}
 721  	}
 722  	return 0;
 723  }
 724  
 725  static int
 726  kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
 727  {
 728  	uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
 729  	boolean_t enabled;
 730  
 731  	assert(configv);
 732  
 733  	enabled = ml_set_interrupts_enabled(FALSE);
 734  
 735  	for (uint32_t i = 0; i < cfg_count; ++i) {
 736  		if (((1ULL << i) & pmc_mask) == 0) {
 737  			continue;
 738  		}
 739  		assert(kpc_controls_counter(i + offset));
 740  
 741  		set_counter_config(i + offset, *configv++);
 742  	}
 743  
 744  	ml_set_interrupts_enabled(enabled);
 745  
 746  	return 0;
 747  }
 748  
 749  static uint32_t kpc_config_sync;
 750  static void
 751  kpc_set_config_xcall(void *vmp_config)
 752  {
 753  	struct kpc_config_remote *mp_config = vmp_config;
 754  	kpc_config_t *new_config = NULL;
 755  	uint32_t classes = 0ULL;
 756  
 757  	assert(mp_config);
 758  	assert(mp_config->configv);
 759  	classes = mp_config->classes;
 760  	new_config = mp_config->configv;
 761  
 762  	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
 763  		kpc_set_configurable_config(new_config, mp_config->pmc_mask);
 764  		new_config += kpc_popcount(mp_config->pmc_mask);
 765  	}
 766  
 767  	if (classes & KPC_CLASS_RAWPMU_MASK) {
 768  		kpc_set_rawpmu_config(new_config);
 769  		new_config += RAWPMU_CONFIG_COUNT;
 770  	}
 771  
 772  	if (os_atomic_dec(&kpc_config_sync, relaxed) == 0) {
 773  		thread_wakeup((event_t) &kpc_config_sync);
 774  	}
 775  }
 776  
 777  static uint64_t
 778  kpc_reload_counter(uint32_t ctr)
 779  {
 780  	assert(ctr < (kpc_configurable_count() + kpc_fixed_count()));
 781  
 782  	uint64_t old = read_counter(ctr);
 783  
 784  	if (kpc_controls_counter(ctr)) {
 785  		write_counter(ctr, FIXED_RELOAD(ctr));
 786  		return old & KPC_ARM64_COUNTER_MASK;
 787  	} else {
 788  		/*
 789  		 * Unset the overflow bit to clear the condition that drives
 790  		 * PMIs.  The power manager is not interested in handling PMIs.
 791  		 */
 792  		write_counter(ctr, old & KPC_ARM64_COUNTER_MASK);
 793  		return 0;
 794  	}
 795  }
 796  
 797  static uint32_t kpc_reload_sync;
 798  static void
 799  kpc_set_reload_xcall(void *vmp_config)
 800  {
 801  	struct kpc_config_remote *mp_config = vmp_config;
 802  	uint32_t classes = 0, count = 0, offset = kpc_fixed_count();
 803  	uint64_t *new_period = NULL, max = kpc_configurable_max();
 804  	boolean_t enabled;
 805  
 806  	assert(mp_config);
 807  	assert(mp_config->configv);
 808  	classes = mp_config->classes;
 809  	new_period = mp_config->configv;
 810  
 811  	enabled = ml_set_interrupts_enabled(FALSE);
 812  
 813  	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
 814  		/*
 815  		 * Update _all_ shadow counters, this cannot be done for only
 816  		 * selected PMCs. Otherwise, we would corrupt the configurable
 817  		 * shadow buffer since the PMCs are muxed according to the pmc
 818  		 * mask.
 819  		 */
 820  		uint64_t all_cfg_mask = (1ULL << kpc_configurable_count()) - 1;
 821  		kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask);
 822  
 823  		/* set the new period */
 824  		count = kpc_configurable_count();
 825  		for (uint32_t i = 0; i < count; ++i) {
 826  			/* ignore the counter */
 827  			if (((1ULL << i) & mp_config->pmc_mask) == 0) {
 828  				continue;
 829  			}
 830  			if (*new_period == 0) {
 831  				*new_period = kpc_configurable_max();
 832  			}
 833  			CONFIGURABLE_RELOAD(i) = max - *new_period;
 834  			/* reload the counter */
 835  			kpc_reload_counter(offset + i);
 836  			/* next period value */
 837  			new_period++;
 838  		}
 839  	}
 840  
 841  	ml_set_interrupts_enabled(enabled);
 842  
 843  	if (os_atomic_dec(&kpc_reload_sync, relaxed) == 0) {
 844  		thread_wakeup((event_t) &kpc_reload_sync);
 845  	}
 846  }
 847  
 848  void
 849  kpc_pmi_handler(unsigned int ctr)
 850  {
 851  	uint64_t extra = kpc_reload_counter(ctr);
 852  
 853  	FIXED_SHADOW(ctr) += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra;
 854  
 855  	if (FIXED_ACTIONID(ctr)) {
 856  		uintptr_t pc = 0;
 857  		bool kernel = true;
 858  		struct arm_saved_state *state;
 859  		state = getCpuDatap()->cpu_int_state;
 860  		if (state) {
 861  			kernel = !PSR64_IS_USER(get_saved_state_cpsr(state));
 862  			pc = get_saved_state_pc(state);
 863  			if (kernel) {
 864  				pc = VM_KERNEL_UNSLIDE(pc);
 865  			}
 866  		}
 867  
 868  		uint64_t config = get_counter_config(ctr);
 869  		kperf_kpc_flags_t flags = kernel ? KPC_KERNEL_PC : 0;
 870  		bool custom_mode = false;
 871  		if ((config & CFGWORD_EL0A32EN_MASK) || (config & CFGWORD_EL0A64EN_MASK)) {
 872  			flags |= KPC_USER_COUNTING;
 873  			custom_mode = true;
 874  		}
 875  		if ((config & CFGWORD_EL1EN_MASK)) {
 876  			flags |= KPC_KERNEL_COUNTING;
 877  			custom_mode = true;
 878  		}
 879  		/*
 880  		 * For backwards-compatibility.
 881  		 */
 882  		if (!custom_mode) {
 883  			flags |= KPC_USER_COUNTING | KPC_KERNEL_COUNTING;
 884  		}
 885  		kpc_sample_kperf(FIXED_ACTIONID(ctr), ctr, config & 0xff, FIXED_SHADOW(ctr),
 886  		    pc, flags);
 887  	}
 888  }
 889  
 890  uint32_t
 891  kpc_get_classes(void)
 892  {
 893  	return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_RAWPMU_MASK;
 894  }
 895  
 896  int
 897  kpc_set_running_arch(struct kpc_running_remote *mp_config)
 898  {
 899  	assert(mp_config != NULL);
 900  
 901  	/* dispatch to all CPUs */
 902  	cpu_broadcast_xcall(&kpc_xcall_sync, TRUE, kpc_set_running_xcall, mp_config);
 903  
 904  	kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask;
 905  	kpc_running_classes = mp_config->classes;
 906  	kpc_configured = 1;
 907  
 908  	return 0;
 909  }
 910  
 911  int
 912  kpc_set_period_arch(struct kpc_config_remote *mp_config)
 913  {
 914  	assert(mp_config);
 915  
 916  	/* dispatch to all CPUs */
 917  	cpu_broadcast_xcall(&kpc_reload_sync, TRUE, kpc_set_reload_xcall, mp_config);
 918  
 919  	kpc_configured = 1;
 920  
 921  	return 0;
 922  }
 923  
 924  int
 925  kpc_set_config_arch(struct kpc_config_remote *mp_config)
 926  {
 927  	assert(mp_config);
 928  	assert(mp_config->configv);
 929  
 930  	/* dispatch to all CPUs */
 931  	cpu_broadcast_xcall(&kpc_config_sync, TRUE, kpc_set_config_xcall, mp_config);
 932  
 933  	kpc_configured = 1;
 934  
 935  	return 0;
 936  }
 937  
 938  void
 939  kpc_idle(void)
 940  {
 941  	if (kpc_configured) {
 942  		save_regs();
 943  	}
 944  }
 945  
 946  void
 947  kpc_idle_exit(void)
 948  {
 949  	if (kpc_configured) {
 950  		restore_regs();
 951  	}
 952  }
 953  
 954  int
 955  kpc_set_sw_inc( uint32_t mask __unused )
 956  {
 957  	return ENOTSUP;
 958  }
 959  
 960  int
 961  kpc_get_pmu_version(void)
 962  {
 963  	return KPC_PMU_ARM_APPLE;
 964  }
 965  
 966  #else /* APPLE_ARM64_ARCH_FAMILY */
 967  
 968  /* We don't currently support non-Apple arm64 PMU configurations like PMUv3 */
 969  
 970  void
 971  kpc_arch_init(void)
 972  {
 973  	/* No-op */
 974  }
 975  
 976  uint32_t
 977  kpc_get_classes(void)
 978  {
 979  	return 0;
 980  }
 981  
 982  uint32_t
 983  kpc_fixed_count(void)
 984  {
 985  	return 0;
 986  }
 987  
 988  uint32_t
 989  kpc_configurable_count(void)
 990  {
 991  	return 0;
 992  }
 993  
 994  uint32_t
 995  kpc_fixed_config_count(void)
 996  {
 997  	return 0;
 998  }
 999  
1000  uint32_t
1001  kpc_configurable_config_count(uint64_t pmc_mask __unused)
1002  {
1003  	return 0;
1004  }
1005  
1006  int
1007  kpc_get_fixed_config(kpc_config_t *configv __unused)
1008  {
1009  	return 0;
1010  }
1011  
1012  uint64_t
1013  kpc_fixed_max(void)
1014  {
1015  	return 0;
1016  }
1017  
1018  uint64_t
1019  kpc_configurable_max(void)
1020  {
1021  	return 0;
1022  }
1023  
1024  int
1025  kpc_get_configurable_config(kpc_config_t *configv __unused, uint64_t pmc_mask __unused)
1026  {
1027  	return ENOTSUP;
1028  }
1029  
1030  int
1031  kpc_get_configurable_counters(uint64_t *counterv __unused, uint64_t pmc_mask __unused)
1032  {
1033  	return ENOTSUP;
1034  }
1035  
1036  int
1037  kpc_get_fixed_counters(uint64_t *counterv __unused)
1038  {
1039  	return 0;
1040  }
1041  
1042  boolean_t
1043  kpc_is_running_fixed(void)
1044  {
1045  	return FALSE;
1046  }
1047  
1048  boolean_t
1049  kpc_is_running_configurable(uint64_t pmc_mask __unused)
1050  {
1051  	return FALSE;
1052  }
1053  
1054  int
1055  kpc_set_running_arch(struct kpc_running_remote *mp_config __unused)
1056  {
1057  	return ENOTSUP;
1058  }
1059  
1060  int
1061  kpc_set_period_arch(struct kpc_config_remote *mp_config __unused)
1062  {
1063  	return ENOTSUP;
1064  }
1065  
1066  int
1067  kpc_set_config_arch(struct kpc_config_remote *mp_config __unused)
1068  {
1069  	return ENOTSUP;
1070  }
1071  
1072  void
1073  kpc_idle(void)
1074  {
1075  	// do nothing
1076  }
1077  
1078  void
1079  kpc_idle_exit(void)
1080  {
1081  	// do nothing
1082  }
1083  
1084  int
1085  kpc_get_all_cpus_counters(uint32_t classes __unused, int *curcpu __unused, uint64_t *buf __unused)
1086  {
1087  	return 0;
1088  }
1089  
1090  int
1091  kpc_set_sw_inc( uint32_t mask __unused )
1092  {
1093  	return ENOTSUP;
1094  }
1095  
1096  int
1097  kpc_get_pmu_version(void)
1098  {
1099  	return KPC_PMU_ERROR;
1100  }
1101  
1102  uint32_t
1103  kpc_rawpmu_config_count(void)
1104  {
1105  	return 0;
1106  }
1107  
1108  int
1109  kpc_get_rawpmu_config(__unused kpc_config_t *configv)
1110  {
1111  	return 0;
1112  }
1113  
1114  #endif /* !APPLE_ARM64_ARCH_FAMILY */