/ duct-tape / xnu / osfmk / kern / kpc_common.c
kpc_common.c
  1  /*
  2   * Copyright (c) 2012 Apple Inc. All rights reserved.
  3   *
  4   * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  5   *
  6   * This file contains Original Code and/or Modifications of Original Code
  7   * as defined in and that are subject to the Apple Public Source License
  8   * Version 2.0 (the 'License'). You may not use this file except in
  9   * compliance with the License. The rights granted to you under the License
 10   * may not be used to create, or enable the creation or redistribution of,
 11   * unlawful or unlicensed copies of an Apple operating system, or to
 12   * circumvent, violate, or enable the circumvention or violation of, any
 13   * terms of an Apple operating system software license agreement.
 14   *
 15   * Please obtain a copy of the License at
 16   * http://www.opensource.apple.com/apsl/ and read it before using this file.
 17   *
 18   * The Original Code and all software distributed under the License are
 19   * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
 20   * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
 21   * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
 22   * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
 23   * Please see the License for the specific language governing rights and
 24   * limitations under the License.
 25   *
 26   * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
 27   */
 28  
 29  #include <mach/mach_types.h>
 30  #include <machine/machine_routines.h>
 31  #include <kern/processor.h>
 32  #include <kern/kalloc.h>
 33  #include <sys/errno.h>
 34  #include <sys/vm.h>
 35  #include <kperf/buffer.h>
 36  #include <kern/thread.h>
 37  #if defined(__arm64__) || defined(__arm__)
 38  #include <arm/cpu_data_internal.h>
 39  #endif
 40  
 41  #include <kern/kpc.h>
 42  
 43  #include <kperf/kperf.h>
 44  #include <kperf/sample.h>
 45  #include <kperf/context.h>
 46  #include <kperf/action.h>
 47  
 48  uint32_t kpc_actionid[KPC_MAX_COUNTERS];
 49  
 50  #define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t))
 51  #define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \
 52  	                 COUNTERBUF_SIZE_PER_CPU)
 53  
 54  /* locks */
 55  static LCK_GRP_DECLARE(kpc_config_lckgrp, "kpc");
 56  static LCK_MTX_DECLARE(kpc_config_lock, &kpc_config_lckgrp);
 57  
 58  /* state specifying if all counters have been requested by kperf */
 59  static boolean_t force_all_ctrs = FALSE;
 60  
 61  /* power manager */
 62  static kpc_pm_handler_t kpc_pm_handler;
 63  static boolean_t kpc_pm_has_custom_config;
 64  static uint64_t kpc_pm_pmc_mask;
 65  #if MACH_ASSERT
 66  static bool kpc_calling_pm = false;
 67  #endif /* MACH_ASSERT */
 68  
 69  boolean_t kpc_context_switch_active = FALSE;
 70  bool kpc_supported = true;
 71  
 72  static uint64_t *
 73  kpc_percpu_alloc(void)
 74  {
 75  	return kheap_alloc_tag(KHEAP_DATA_BUFFERS, COUNTERBUF_SIZE_PER_CPU,
 76  	           Z_WAITOK | Z_ZERO, VM_KERN_MEMORY_DIAG);
 77  }
 78  
 79  static void
 80  kpc_percpu_free(uint64_t *buf)
 81  {
 82  	if (buf) {
 83  		kheap_free(KHEAP_DATA_BUFFERS, buf, COUNTERBUF_SIZE_PER_CPU);
 84  	}
 85  }
 86  
 87  boolean_t
 88  kpc_register_cpu(struct cpu_data *cpu_data)
 89  {
 90  	assert(cpu_data);
 91  	assert(cpu_data->cpu_kpc_buf[0] == NULL);
 92  	assert(cpu_data->cpu_kpc_buf[1] == NULL);
 93  	assert(cpu_data->cpu_kpc_shadow == NULL);
 94  	assert(cpu_data->cpu_kpc_reload == NULL);
 95  
 96  	/*
 97  	 * Buffers allocated through kpc_counterbuf_alloc() are large enough to
 98  	 * store all PMCs values from all CPUs. This mimics the userspace API.
 99  	 * This does not suit well with the per-CPU kpc buffers, since:
100  	 *      1. Buffers don't need to be this large.
101  	 *      2. The actual number of CPUs is not known at this point.
102  	 *
103  	 * CPUs are asked to callout into kpc when being registered, we'll
104  	 * allocate the memory here.
105  	 */
106  
107  	if ((cpu_data->cpu_kpc_buf[0] = kpc_percpu_alloc()) == NULL) {
108  		goto error;
109  	}
110  	if ((cpu_data->cpu_kpc_buf[1] = kpc_percpu_alloc()) == NULL) {
111  		goto error;
112  	}
113  	if ((cpu_data->cpu_kpc_shadow = kpc_percpu_alloc()) == NULL) {
114  		goto error;
115  	}
116  	if ((cpu_data->cpu_kpc_reload = kpc_percpu_alloc()) == NULL) {
117  		goto error;
118  	}
119  
120  	/* success */
121  	return TRUE;
122  
123  error:
124  	kpc_unregister_cpu(cpu_data);
125  	return FALSE;
126  }
127  
128  void
129  kpc_unregister_cpu(struct cpu_data *cpu_data)
130  {
131  	assert(cpu_data);
132  	if (cpu_data->cpu_kpc_buf[0] != NULL) {
133  		kpc_percpu_free(cpu_data->cpu_kpc_buf[0]);
134  		cpu_data->cpu_kpc_buf[0] = NULL;
135  	}
136  	if (cpu_data->cpu_kpc_buf[1] != NULL) {
137  		kpc_percpu_free(cpu_data->cpu_kpc_buf[1]);
138  		cpu_data->cpu_kpc_buf[1] = NULL;
139  	}
140  	if (cpu_data->cpu_kpc_shadow != NULL) {
141  		kpc_percpu_free(cpu_data->cpu_kpc_shadow);
142  		cpu_data->cpu_kpc_shadow = NULL;
143  	}
144  	if (cpu_data->cpu_kpc_reload != NULL) {
145  		kpc_percpu_free(cpu_data->cpu_kpc_reload);
146  		cpu_data->cpu_kpc_reload = NULL;
147  	}
148  }
149  
150  
151  static void
152  kpc_task_set_forced_all_ctrs(task_t task, boolean_t state)
153  {
154  	assert(task);
155  
156  	task_lock(task);
157  	if (state) {
158  		task->t_kpc |= TASK_KPC_FORCED_ALL_CTRS;
159  	} else {
160  		task->t_kpc &= ~TASK_KPC_FORCED_ALL_CTRS;
161  	}
162  	task_unlock(task);
163  }
164  
165  static boolean_t
166  kpc_task_get_forced_all_ctrs(task_t task)
167  {
168  	assert(task);
169  	return task->t_kpc & TASK_KPC_FORCED_ALL_CTRS ? TRUE : FALSE;
170  }
171  
172  int
173  kpc_force_all_ctrs(task_t task, int val)
174  {
175  	boolean_t new_state = val ? TRUE : FALSE;
176  	boolean_t old_state = kpc_get_force_all_ctrs();
177  
178  	/*
179  	 * Refuse to do the operation if the counters are already forced by
180  	 * another task.
181  	 */
182  	if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task)) {
183  		return EACCES;
184  	}
185  
186  	/* nothing to do if the state is not changing */
187  	if (old_state == new_state) {
188  		return 0;
189  	}
190  
191  	/* notify the power manager */
192  	if (kpc_pm_handler) {
193  #if MACH_ASSERT
194  		kpc_calling_pm = true;
195  #endif /* MACH_ASSERT */
196  		kpc_pm_handler( new_state ? FALSE : TRUE );
197  #if MACH_ASSERT
198  		kpc_calling_pm = false;
199  #endif /* MACH_ASSERT */
200  	}
201  
202  	/*
203  	 * This is a force -- ensure that counters are forced, even if power
204  	 * management fails to acknowledge it.
205  	 */
206  	if (force_all_ctrs != new_state) {
207  		force_all_ctrs = new_state;
208  	}
209  
210  	/* update the task bits */
211  	kpc_task_set_forced_all_ctrs(task, new_state);
212  
213  	return 0;
214  }
215  
216  void
217  kpc_pm_acknowledge(boolean_t available_to_pm)
218  {
219  	/*
220  	 * Force-all-counters should still be true when the counters are being
221  	 * made available to power management and false when counters are going
222  	 * to be taken away.
223  	 */
224  	assert(force_all_ctrs == available_to_pm);
225  	/*
226  	 * Make sure power management isn't playing games with us.
227  	 */
228  	assert(kpc_calling_pm == true);
229  
230  	/*
231  	 * Counters being available means no one is forcing all counters.
232  	 */
233  	force_all_ctrs = available_to_pm ? FALSE : TRUE;
234  }
235  
236  int
237  kpc_get_force_all_ctrs(void)
238  {
239  	return force_all_ctrs;
240  }
241  
242  boolean_t
243  kpc_multiple_clients(void)
244  {
245  	return kpc_pm_handler != NULL;
246  }
247  
248  boolean_t
249  kpc_controls_fixed_counters(void)
250  {
251  	return !kpc_pm_handler || force_all_ctrs || !kpc_pm_has_custom_config;
252  }
253  
254  boolean_t
255  kpc_controls_counter(uint32_t ctr)
256  {
257  	uint64_t pmc_mask = 0ULL;
258  
259  	assert(ctr < (kpc_fixed_count() + kpc_configurable_count()));
260  
261  	if (ctr < kpc_fixed_count()) {
262  		return kpc_controls_fixed_counters();
263  	}
264  
265  	/*
266  	 * By default kpc manages all PMCs, but if the Power Manager registered
267  	 * with custom_config=TRUE, the Power Manager manages its reserved PMCs.
268  	 * However, kpc takes ownership back if a task acquired all PMCs via
269  	 * force_all_ctrs.
270  	 */
271  	pmc_mask = (1ULL << (ctr - kpc_fixed_count()));
272  	if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs) {
273  		return FALSE;
274  	}
275  
276  	return TRUE;
277  }
278  
279  uint32_t
280  kpc_get_running(void)
281  {
282  	uint64_t pmc_mask = 0;
283  	uint32_t cur_state = 0;
284  
285  	if (kpc_is_running_fixed()) {
286  		cur_state |= KPC_CLASS_FIXED_MASK;
287  	}
288  
289  	pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
290  	if (kpc_is_running_configurable(pmc_mask)) {
291  		cur_state |= KPC_CLASS_CONFIGURABLE_MASK;
292  	}
293  
294  	pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
295  	if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask)) {
296  		cur_state |= KPC_CLASS_POWER_MASK;
297  	}
298  
299  	return cur_state;
300  }
301  
302  /* may be called from an IPI */
303  int
304  kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf)
305  {
306  	int enabled = 0, offset = 0;
307  	uint64_t pmc_mask = 0ULL;
308  
309  	assert(buf);
310  
311  	enabled = ml_set_interrupts_enabled(FALSE);
312  
313  	/* grab counters and CPU number as close as possible */
314  	if (curcpu) {
315  		*curcpu = cpu_number();
316  	}
317  
318  	if (classes & KPC_CLASS_FIXED_MASK) {
319  		kpc_get_fixed_counters(&buf[offset]);
320  		offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
321  	}
322  
323  	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
324  		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
325  		kpc_get_configurable_counters(&buf[offset], pmc_mask);
326  		offset += kpc_popcount(pmc_mask);
327  	}
328  
329  	if (classes & KPC_CLASS_POWER_MASK) {
330  		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
331  		kpc_get_configurable_counters(&buf[offset], pmc_mask);
332  		offset += kpc_popcount(pmc_mask);
333  	}
334  
335  	ml_set_interrupts_enabled(enabled);
336  
337  	return offset;
338  }
339  
340  /* generic counter reading function, public api */
341  int
342  kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes,
343      int *curcpu, uint64_t *buf)
344  {
345  	assert(buf);
346  
347  	/*
348  	 * Unlike reading the current CPU counters, reading counters from all
349  	 * CPUs is architecture dependent. This allows kpc to make the most of
350  	 * the platform if memory mapped registers is supported.
351  	 */
352  	if (all_cpus) {
353  		return kpc_get_all_cpus_counters(classes, curcpu, buf);
354  	} else {
355  		return kpc_get_curcpu_counters(classes, curcpu, buf);
356  	}
357  }
358  
359  int
360  kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes,
361      int *curcpu, uint64_t *buf)
362  {
363  	int curcpu_id = cpu_number();
364  	uint32_t cfg_count = kpc_configurable_count(), offset = 0;
365  	uint64_t pmc_mask = 0ULL;
366  	boolean_t enabled;
367  
368  	assert(buf);
369  
370  	enabled = ml_set_interrupts_enabled(FALSE);
371  
372  	curcpu_id = cpu_number();
373  	if (curcpu) {
374  		*curcpu = curcpu_id;
375  	}
376  
377  	for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) {
378  		/* filter if the caller did not request all cpus */
379  		if (!all_cpus && (cpu != curcpu_id)) {
380  			continue;
381  		}
382  
383  		if (classes & KPC_CLASS_FIXED_MASK) {
384  			uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
385  			memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t));
386  			offset += count;
387  		}
388  
389  		if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
390  			pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
391  
392  			for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) {
393  				if ((1ULL << cfg_ctr) & pmc_mask) {
394  					buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
395  				}
396  			}
397  		}
398  
399  		if (classes & KPC_CLASS_POWER_MASK) {
400  			pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
401  
402  			for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) {
403  				if ((1ULL << cfg_ctr) & pmc_mask) {
404  					buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
405  				}
406  			}
407  		}
408  	}
409  
410  	ml_set_interrupts_enabled(enabled);
411  
412  	return offset;
413  }
414  
415  uint32_t
416  kpc_get_counter_count(uint32_t classes)
417  {
418  	uint32_t count = 0;
419  
420  	if (classes & KPC_CLASS_FIXED_MASK) {
421  		count += kpc_fixed_count();
422  	}
423  
424  	if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
425  		uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes);
426  		uint32_t pmc_cnt = kpc_popcount(pmc_msk);
427  		count += pmc_cnt;
428  	}
429  
430  	return count;
431  }
432  
433  uint32_t
434  kpc_get_config_count(uint32_t classes)
435  {
436  	uint32_t count = 0;
437  
438  	if (classes & KPC_CLASS_FIXED_MASK) {
439  		count += kpc_fixed_config_count();
440  	}
441  
442  	if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
443  		uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes);
444  		count += kpc_configurable_config_count(pmc_mask);
445  	}
446  
447  	if ((classes & KPC_CLASS_RAWPMU_MASK) && !kpc_multiple_clients()) {
448  		count += kpc_rawpmu_config_count();
449  	}
450  
451  	return count;
452  }
453  
454  int
455  kpc_get_config(uint32_t classes, kpc_config_t *current_config)
456  {
457  	uint32_t count = 0;
458  
459  	assert(current_config);
460  
461  	if (classes & KPC_CLASS_FIXED_MASK) {
462  		kpc_get_fixed_config(&current_config[count]);
463  		count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
464  	}
465  
466  	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
467  		uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
468  		kpc_get_configurable_config(&current_config[count], pmc_mask);
469  		count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
470  	}
471  
472  	if (classes & KPC_CLASS_POWER_MASK) {
473  		uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
474  		kpc_get_configurable_config(&current_config[count], pmc_mask);
475  		count += kpc_get_config_count(KPC_CLASS_POWER_MASK);
476  	}
477  
478  	if (classes & KPC_CLASS_RAWPMU_MASK) {
479  		// Client shouldn't ask for config words that aren't available.
480  		// Most likely, they'd misinterpret the returned buffer if we
481  		// allowed this.
482  		if (kpc_multiple_clients()) {
483  			return EPERM;
484  		}
485  		kpc_get_rawpmu_config(&current_config[count]);
486  		count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK);
487  	}
488  
489  	return 0;
490  }
491  
492  int
493  kpc_set_config(uint32_t classes, kpc_config_t *configv)
494  {
495  	int ret = 0;
496  	struct kpc_config_remote mp_config = {
497  		.classes = classes, .configv = configv,
498  		.pmc_mask = kpc_get_configurable_pmc_mask(classes)
499  	};
500  
501  	assert(configv);
502  
503  	/* don't allow RAWPMU configuration when sharing counters */
504  	if ((classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients()) {
505  		return EPERM;
506  	}
507  
508  	/* no clients have the right to modify both classes */
509  	if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
510  	    (classes & (KPC_CLASS_POWER_MASK))) {
511  		return EPERM;
512  	}
513  
514  	lck_mtx_lock(&kpc_config_lock);
515  
516  	/* translate the power class for the machine layer */
517  	if (classes & KPC_CLASS_POWER_MASK) {
518  		mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
519  	}
520  
521  	ret = kpc_set_config_arch( &mp_config );
522  
523  	lck_mtx_unlock(&kpc_config_lock);
524  
525  	return ret;
526  }
527  
528  uint32_t
529  kpc_get_counterbuf_size(void)
530  {
531  	return COUNTERBUF_SIZE;
532  }
533  
534  /* allocate a buffer large enough for all possible counters */
535  uint64_t *
536  kpc_counterbuf_alloc(void)
537  {
538  	return kheap_alloc_tag(KHEAP_DATA_BUFFERS, COUNTERBUF_SIZE,
539  	           Z_WAITOK | Z_ZERO, VM_KERN_MEMORY_DIAG);
540  }
541  
542  void
543  kpc_counterbuf_free(uint64_t *buf)
544  {
545  	if (buf) {
546  		kheap_free(KHEAP_DATA_BUFFERS, buf, COUNTERBUF_SIZE);
547  	}
548  }
549  
550  void
551  kpc_sample_kperf(uint32_t actionid, uint32_t counter, uint64_t config,
552      uint64_t count, uintptr_t pc, kperf_kpc_flags_t flags)
553  {
554  	struct kperf_sample sbuf;
555  
556  	uint64_t desc = config | (uint64_t)counter << 32 | (uint64_t)flags << 48;
557  
558  	BUF_DATA(PERF_KPC_HNDLR | DBG_FUNC_START, desc, count, pc);
559  
560  	thread_t thread = current_thread();
561  	task_t task = get_threadtask(thread);
562  
563  	struct kperf_context ctx = {
564  		.cur_thread = thread,
565  		.cur_task = task,
566  		.cur_pid = task_pid(task),
567  		.trigger_type = TRIGGER_TYPE_PMI,
568  		.trigger_id = 0,
569  	};
570  
571  	int r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER);
572  
573  	BUF_INFO(PERF_KPC_HNDLR | DBG_FUNC_END, r);
574  }
575  
576  
577  int
578  kpc_set_period(uint32_t classes, uint64_t *val)
579  {
580  	struct kpc_config_remote mp_config = {
581  		.classes = classes, .configv = val,
582  		.pmc_mask = kpc_get_configurable_pmc_mask(classes)
583  	};
584  
585  	assert(val);
586  
587  	/* no clients have the right to modify both classes */
588  	if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
589  	    (classes & (KPC_CLASS_POWER_MASK))) {
590  		return EPERM;
591  	}
592  
593  	lck_mtx_lock(&kpc_config_lock);
594  
595  #ifdef FIXED_COUNTER_SHADOW
596  	if ((classes & KPC_CLASS_FIXED_MASK) && !kpc_controls_fixed_counters()) {
597  		lck_mtx_unlock(&kpc_config_lock);
598  		return EPERM;
599  	}
600  # else
601  	if (classes & KPC_CLASS_FIXED_MASK) {
602  		lck_mtx_unlock(&kpc_config_lock);
603  		return EINVAL;
604  	}
605  #endif
606  
607  	/* translate the power class for the machine layer */
608  	if (classes & KPC_CLASS_POWER_MASK) {
609  		mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
610  	}
611  
612  	kprintf("setting period %u\n", classes);
613  	kpc_set_period_arch( &mp_config );
614  
615  	lck_mtx_unlock(&kpc_config_lock);
616  
617  	return 0;
618  }
619  
620  int
621  kpc_get_period(uint32_t classes, uint64_t *val)
622  {
623  	uint32_t count = 0;
624  	uint64_t pmc_mask = 0ULL;
625  
626  	assert(val);
627  
628  	lck_mtx_lock(&kpc_config_lock);
629  
630  	if (classes & KPC_CLASS_FIXED_MASK) {
631  		/* convert reload values to periods */
632  		count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
633  		for (uint32_t i = 0; i < count; ++i) {
634  			*val++ = kpc_fixed_max() - FIXED_RELOAD(i);
635  		}
636  	}
637  
638  	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
639  		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
640  
641  		/* convert reload values to periods */
642  		count = kpc_configurable_count();
643  		for (uint32_t i = 0; i < count; ++i) {
644  			if ((1ULL << i) & pmc_mask) {
645  				*val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
646  			}
647  		}
648  	}
649  
650  	if (classes & KPC_CLASS_POWER_MASK) {
651  		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
652  
653  		/* convert reload values to periods */
654  		count = kpc_configurable_count();
655  		for (uint32_t i = 0; i < count; ++i) {
656  			if ((1ULL << i) & pmc_mask) {
657  				*val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
658  			}
659  		}
660  	}
661  
662  	lck_mtx_unlock(&kpc_config_lock);
663  
664  	return 0;
665  }
666  
667  int
668  kpc_set_actionid(uint32_t classes, uint32_t *val)
669  {
670  	uint32_t count = 0;
671  	uint64_t pmc_mask = 0ULL;
672  
673  	assert(val);
674  
675  	/* NOTE: what happens if a pmi occurs while actionids are being
676  	 * set is undefined. */
677  	lck_mtx_lock(&kpc_config_lock);
678  
679  	if (classes & KPC_CLASS_FIXED_MASK) {
680  		count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
681  		memcpy(&FIXED_ACTIONID(0), val, count * sizeof(uint32_t));
682  		val += count;
683  	}
684  
685  	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
686  		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
687  
688  		count = kpc_configurable_count();
689  		for (uint32_t i = 0; i < count; ++i) {
690  			if ((1ULL << i) & pmc_mask) {
691  				CONFIGURABLE_ACTIONID(i) = *val++;
692  			}
693  		}
694  	}
695  
696  	if (classes & KPC_CLASS_POWER_MASK) {
697  		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
698  
699  		count = kpc_configurable_count();
700  		for (uint32_t i = 0; i < count; ++i) {
701  			if ((1ULL << i) & pmc_mask) {
702  				CONFIGURABLE_ACTIONID(i) = *val++;
703  			}
704  		}
705  	}
706  
707  	lck_mtx_unlock(&kpc_config_lock);
708  
709  	return 0;
710  }
711  
712  int
713  kpc_get_actionid(uint32_t classes, uint32_t *val)
714  {
715  	uint32_t count = 0;
716  	uint64_t pmc_mask = 0ULL;
717  
718  	assert(val);
719  
720  	lck_mtx_lock(&kpc_config_lock);
721  
722  	if (classes & KPC_CLASS_FIXED_MASK) {
723  		count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
724  		memcpy(val, &FIXED_ACTIONID(0), count * sizeof(uint32_t));
725  		val += count;
726  	}
727  
728  	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
729  		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
730  
731  		count = kpc_configurable_count();
732  		for (uint32_t i = 0; i < count; ++i) {
733  			if ((1ULL << i) & pmc_mask) {
734  				*val++ = CONFIGURABLE_ACTIONID(i);
735  			}
736  		}
737  	}
738  
739  	if (classes & KPC_CLASS_POWER_MASK) {
740  		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
741  
742  		count = kpc_configurable_count();
743  		for (uint32_t i = 0; i < count; ++i) {
744  			if ((1ULL << i) & pmc_mask) {
745  				*val++ = CONFIGURABLE_ACTIONID(i);
746  			}
747  		}
748  	}
749  
750  	lck_mtx_unlock(&kpc_config_lock);
751  
752  	return 0;
753  }
754  
755  int
756  kpc_set_running(uint32_t classes)
757  {
758  	uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK;
759  	struct kpc_running_remote mp_config = {
760  		.classes = classes, .cfg_target_mask = 0ULL, .cfg_state_mask = 0ULL
761  	};
762  
763  	/* target all available PMCs */
764  	mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes);
765  
766  	/* translate the power class for the machine layer */
767  	if (classes & KPC_CLASS_POWER_MASK) {
768  		mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
769  	}
770  
771  	/* generate the state of each configurable PMCs */
772  	mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes);
773  
774  	return kpc_set_running_arch(&mp_config);
775  }
776  
777  boolean_t
778  kpc_register_pm_handler(kpc_pm_handler_t handler)
779  {
780  	return kpc_reserve_pm_counters(0x38, handler, TRUE);
781  }
782  
783  boolean_t
784  kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler,
785      boolean_t custom_config)
786  {
787  	uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1;
788  	uint64_t req_mask = 0ULL;
789  
790  	/* pre-condition */
791  	assert(handler != NULL);
792  	assert(kpc_pm_handler == NULL);
793  
794  	/* check number of counters requested */
795  	req_mask = (pmc_mask & all_mask);
796  	assert(kpc_popcount(req_mask) <= kpc_configurable_count());
797  
798  	/* save the power manager states */
799  	kpc_pm_has_custom_config = custom_config;
800  	kpc_pm_pmc_mask = req_mask;
801  	kpc_pm_handler = handler;
802  
803  	printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n",
804  	    req_mask, custom_config);
805  
806  	/* post-condition */
807  	{
808  		uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
809  		uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask);
810  #pragma unused(cfg_count, pwr_count)
811  		assert((cfg_count + pwr_count) == kpc_configurable_count());
812  	}
813  
814  	return force_all_ctrs ? FALSE : TRUE;
815  }
816  
817  void
818  kpc_release_pm_counters(void)
819  {
820  	/* pre-condition */
821  	assert(kpc_pm_handler != NULL);
822  
823  	/* release the counters */
824  	kpc_pm_has_custom_config = FALSE;
825  	kpc_pm_pmc_mask = 0ULL;
826  	kpc_pm_handler = NULL;
827  
828  	printf("kpc: pm released counters\n");
829  
830  	/* post-condition */
831  	assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count());
832  }
833  
834  uint8_t
835  kpc_popcount(uint64_t value)
836  {
837  	return (uint8_t)__builtin_popcountll(value);
838  }
839  
840  uint64_t
841  kpc_get_configurable_pmc_mask(uint32_t classes)
842  {
843  	uint32_t configurable_count = kpc_configurable_count();
844  	uint64_t cfg_mask = 0ULL, pwr_mask = 0ULL, all_cfg_pmcs_mask = 0ULL;
845  
846  	/* not configurable classes or no configurable counters */
847  	if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) ||
848  	    (configurable_count == 0)) {
849  		goto exit;
850  	}
851  
852  	assert(configurable_count < 64);
853  	all_cfg_pmcs_mask = (1ULL << configurable_count) - 1;
854  
855  	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
856  		if (force_all_ctrs == TRUE) {
857  			cfg_mask |= all_cfg_pmcs_mask;
858  		} else {
859  			cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask;
860  		}
861  	}
862  
863  	/*
864  	 * The power class exists iff:
865  	 *      - No tasks acquired all PMCs
866  	 *      - PM registered and uses kpc to interact with PMCs
867  	 */
868  	if ((force_all_ctrs == FALSE) &&
869  	    (kpc_pm_handler != NULL) &&
870  	    (kpc_pm_has_custom_config == FALSE) &&
871  	    (classes & KPC_CLASS_POWER_MASK)) {
872  		pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask;
873  	}
874  
875  exit:
876  	/* post-conditions */
877  	assert(((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 );
878  	assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count());
879  	assert((cfg_mask & pwr_mask) == 0ULL );
880  
881  	return cfg_mask | pwr_mask;
882  }