/ duct-tape / xnu / osfmk / kern / host.c
host.c
   1  /*
   2   * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
   3   *
   4   * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
   5   *
   6   * This file contains Original Code and/or Modifications of Original Code
   7   * as defined in and that are subject to the Apple Public Source License
   8   * Version 2.0 (the 'License'). You may not use this file except in
   9   * compliance with the License. The rights granted to you under the License
  10   * may not be used to create, or enable the creation or redistribution of,
  11   * unlawful or unlicensed copies of an Apple operating system, or to
  12   * circumvent, violate, or enable the circumvention or violation of, any
  13   * terms of an Apple operating system software license agreement.
  14   *
  15   * Please obtain a copy of the License at
  16   * http://www.opensource.apple.com/apsl/ and read it before using this file.
  17   *
  18   * The Original Code and all software distributed under the License are
  19   * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  20   * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  21   * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  22   * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  23   * Please see the License for the specific language governing rights and
  24   * limitations under the License.
  25   *
  26   * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  27   */
  28  /*
  29   * @OSF_COPYRIGHT@
  30   */
  31  /*
  32   * Mach Operating System
  33   * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
  34   * All Rights Reserved.
  35   *
  36   * Permission to use, copy, modify and distribute this software and its
  37   * documentation is hereby granted, provided that both the copyright
  38   * notice and this permission notice appear in all copies of the
  39   * software, derivative works or modified versions, and any portions
  40   * thereof, and that both notices appear in supporting documentation.
  41   *
  42   * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  43   * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  44   * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  45   *
  46   * Carnegie Mellon requests users of this software to return to
  47   *
  48   *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  49   *  School of Computer Science
  50   *  Carnegie Mellon University
  51   *  Pittsburgh PA 15213-3890
  52   *
  53   * any improvements or extensions that they make and grant Carnegie Mellon
  54   * the rights to redistribute these changes.
  55   */
  56  /*
  57   */
  58  
  59  /*
  60   *	host.c
  61   *
  62   *	Non-ipc host functions.
  63   */
  64  
  65  #include <mach/mach_types.h>
  66  #include <mach/boolean.h>
  67  #include <mach/host_info.h>
  68  #include <mach/host_special_ports.h>
  69  #include <mach/kern_return.h>
  70  #include <mach/machine.h>
  71  #include <mach/port.h>
  72  #include <mach/processor_info.h>
  73  #include <mach/vm_param.h>
  74  #include <mach/processor.h>
  75  #include <mach/mach_host_server.h>
  76  #include <mach/host_priv_server.h>
  77  #include <mach/vm_map.h>
  78  #include <mach/task_info.h>
  79  
  80  #include <machine/commpage.h>
  81  #include <machine/cpu_capabilities.h>
  82  
  83  #include <kern/kern_types.h>
  84  #include <kern/assert.h>
  85  #include <kern/kalloc.h>
  86  #include <kern/host.h>
  87  #include <kern/host_statistics.h>
  88  #include <kern/ipc_host.h>
  89  #include <kern/misc_protos.h>
  90  #include <kern/sched.h>
  91  #include <kern/processor.h>
  92  #include <kern/mach_node.h>     // mach_node_port_changed()
  93  
  94  #include <vm/vm_map.h>
  95  #include <vm/vm_purgeable_internal.h>
  96  #include <vm/vm_pageout.h>
  97  
  98  #include <IOKit/IOBSD.h> // IOTaskHasEntitlement
  99  #include <IOKit/IOKitKeys.h> // DriverKit entitlement strings
 100  
 101  
 102  #if CONFIG_ATM
 103  #include <atm/atm_internal.h>
 104  #endif
 105  
 106  #if CONFIG_MACF
 107  #include <security/mac_mach_internal.h>
 108  #endif
 109  
 110  #include <pexpert/pexpert.h>
 111  
 112  SCALABLE_COUNTER_DEFINE(vm_statistics_zero_fill_count);        /* # of zero fill pages */
 113  SCALABLE_COUNTER_DEFINE(vm_statistics_reactivations);          /* # of pages reactivated */
 114  SCALABLE_COUNTER_DEFINE(vm_statistics_pageins);                /* # of pageins */
 115  SCALABLE_COUNTER_DEFINE(vm_statistics_pageouts);               /* # of pageouts */
 116  SCALABLE_COUNTER_DEFINE(vm_statistics_faults);                 /* # of faults */
 117  SCALABLE_COUNTER_DEFINE(vm_statistics_cow_faults);             /* # of copy-on-writes */
 118  SCALABLE_COUNTER_DEFINE(vm_statistics_lookups);                /* object cache lookups */
 119  SCALABLE_COUNTER_DEFINE(vm_statistics_hits);                   /* object cache hits */
 120  SCALABLE_COUNTER_DEFINE(vm_statistics_purges);                 /* # of pages purged */
 121  SCALABLE_COUNTER_DEFINE(vm_statistics_decompressions);         /* # of pages decompressed */
 122  SCALABLE_COUNTER_DEFINE(vm_statistics_compressions);           /* # of pages compressed */
 123  SCALABLE_COUNTER_DEFINE(vm_statistics_swapins);                /* # of pages swapped in (via compression segments) */
 124  SCALABLE_COUNTER_DEFINE(vm_statistics_swapouts);               /* # of pages swapped out (via compression segments) */
 125  SCALABLE_COUNTER_DEFINE(vm_statistics_total_uncompressed_pages_in_compressor); /* # of pages (uncompressed) held within the compressor. */
 126  SCALABLE_COUNTER_DEFINE(vm_page_grab_count);
 127  
 128  host_data_t realhost;
 129  
 130  static void
 131  get_host_vm_stats(vm_statistics64_t out)
 132  {
 133  #ifdef __DARLING__
 134  	memset(out, 0, sizeof(*out));
 135  #else
 136  	out->zero_fill_count = counter_load(&vm_statistics_zero_fill_count);
 137  	out->reactivations = counter_load(&vm_statistics_reactivations);
 138  	out->pageins = counter_load(&vm_statistics_pageins);
 139  	out->pageouts = counter_load(&vm_statistics_pageouts);
 140  	out->faults = counter_load(&vm_statistics_faults);
 141  	out->cow_faults = counter_load(&vm_statistics_cow_faults);
 142  	out->lookups = counter_load(&vm_statistics_lookups);
 143  	out->hits = counter_load(&vm_statistics_hits);
 144  	out->compressions = counter_load(&vm_statistics_compressions);
 145  	out->decompressions = counter_load(&vm_statistics_decompressions);
 146  	out->swapins = counter_load(&vm_statistics_swapins);
 147  	out->swapouts = counter_load(&vm_statistics_swapouts);
 148  #endif // __DARLING__
 149  }
 150  vm_extmod_statistics_data_t host_extmod_statistics;
 151  
 152  kern_return_t
 153  host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
 154  {
 155  	if (host_priv == HOST_PRIV_NULL) {
 156  		return KERN_INVALID_ARGUMENT;
 157  	}
 158  
 159  	unsigned int count = processor_count;
 160  	assert(count != 0);
 161  
 162  	static_assert(sizeof(mach_port_t) == sizeof(processor_t));
 163  
 164  	mach_port_t* ports = kalloc((vm_size_t)(count * sizeof(mach_port_t)));
 165  	if (!ports) {
 166  		return KERN_RESOURCE_SHORTAGE;
 167  	}
 168  
 169  	for (unsigned int i = 0; i < count; i++) {
 170  		processor_t processor = processor_array[i];
 171  		assert(processor != PROCESSOR_NULL);
 172  
 173  		/* do the conversion that Mig should handle */
 174  		ipc_port_t processor_port = convert_processor_to_port(processor);
 175  		ports[i] = processor_port;
 176  	}
 177  
 178  	*countp = count;
 179  	*out_array = (processor_array_t)ports;
 180  
 181  	return KERN_SUCCESS;
 182  }
 183  
 184  extern int sched_allow_NO_SMT_threads;
 185  
 186  #ifndef __DARLING__
 187  kern_return_t
 188  host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
 189  {
 190  	if (host == HOST_NULL) {
 191  		return KERN_INVALID_ARGUMENT;
 192  	}
 193  
 194  	switch (flavor) {
 195  	case HOST_BASIC_INFO: {
 196  		host_basic_info_t basic_info;
 197  		int master_id = master_processor->cpu_id;
 198  
 199  		/*
 200  		 *	Basic information about this host.
 201  		 */
 202  		if (*count < HOST_BASIC_INFO_OLD_COUNT) {
 203  			return KERN_FAILURE;
 204  		}
 205  
 206  		basic_info = (host_basic_info_t)info;
 207  
 208  		basic_info->memory_size = machine_info.memory_size;
 209  		basic_info->cpu_type = slot_type(master_id);
 210  		basic_info->cpu_subtype = slot_subtype(master_id);
 211  		basic_info->max_cpus = machine_info.max_cpus;
 212  #if defined(__x86_64__)
 213  		if (sched_allow_NO_SMT_threads && current_task()->t_flags & TF_NO_SMT) {
 214  			basic_info->avail_cpus = primary_processor_avail_count_user;
 215  		} else {
 216  			basic_info->avail_cpus = processor_avail_count_user;
 217  		}
 218  #else
 219  		basic_info->avail_cpus = processor_avail_count;
 220  #endif
 221  
 222  
 223  		if (*count >= HOST_BASIC_INFO_COUNT) {
 224  			basic_info->cpu_threadtype = slot_threadtype(master_id);
 225  			basic_info->physical_cpu = machine_info.physical_cpu;
 226  			basic_info->physical_cpu_max = machine_info.physical_cpu_max;
 227  #if defined(__x86_64__)
 228  			basic_info->logical_cpu = basic_info->avail_cpus;
 229  #else
 230  			basic_info->logical_cpu = machine_info.logical_cpu;
 231  #endif
 232  			basic_info->logical_cpu_max = machine_info.logical_cpu_max;
 233  
 234  			basic_info->max_mem = machine_info.max_mem;
 235  
 236  			*count = HOST_BASIC_INFO_COUNT;
 237  		} else {
 238  			*count = HOST_BASIC_INFO_OLD_COUNT;
 239  		}
 240  
 241  		return KERN_SUCCESS;
 242  	}
 243  
 244  	case HOST_SCHED_INFO: {
 245  		host_sched_info_t sched_info;
 246  		uint32_t quantum_time;
 247  		uint64_t quantum_ns;
 248  
 249  		/*
 250  		 *	Return scheduler information.
 251  		 */
 252  		if (*count < HOST_SCHED_INFO_COUNT) {
 253  			return KERN_FAILURE;
 254  		}
 255  
 256  		sched_info = (host_sched_info_t)info;
 257  
 258  		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
 259  		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
 260  
 261  		sched_info->min_timeout = sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
 262  
 263  		*count = HOST_SCHED_INFO_COUNT;
 264  
 265  		return KERN_SUCCESS;
 266  	}
 267  
 268  	case HOST_RESOURCE_SIZES: {
 269  		/*
 270  		 * Return sizes of kernel data structures
 271  		 */
 272  		if (*count < HOST_RESOURCE_SIZES_COUNT) {
 273  			return KERN_FAILURE;
 274  		}
 275  
 276  		/* XXX Fail until ledgers are implemented */
 277  		return KERN_INVALID_ARGUMENT;
 278  	}
 279  
 280  	case HOST_PRIORITY_INFO: {
 281  		host_priority_info_t priority_info;
 282  
 283  		if (*count < HOST_PRIORITY_INFO_COUNT) {
 284  			return KERN_FAILURE;
 285  		}
 286  
 287  		priority_info = (host_priority_info_t)info;
 288  
 289  		priority_info->kernel_priority = MINPRI_KERNEL;
 290  		priority_info->system_priority = MINPRI_KERNEL;
 291  		priority_info->server_priority = MINPRI_RESERVED;
 292  		priority_info->user_priority = BASEPRI_DEFAULT;
 293  		priority_info->depress_priority = DEPRESSPRI;
 294  		priority_info->idle_priority = IDLEPRI;
 295  		priority_info->minimum_priority = MINPRI_USER;
 296  		priority_info->maximum_priority = MAXPRI_RESERVED;
 297  
 298  		*count = HOST_PRIORITY_INFO_COUNT;
 299  
 300  		return KERN_SUCCESS;
 301  	}
 302  
 303  	/*
 304  	 * Gestalt for various trap facilities.
 305  	 */
 306  	case HOST_MACH_MSG_TRAP:
 307  	case HOST_SEMAPHORE_TRAPS: {
 308  		*count = 0;
 309  		return KERN_SUCCESS;
 310  	}
 311  
 312  	case HOST_CAN_HAS_DEBUGGER: {
 313  		host_can_has_debugger_info_t can_has_debugger_info;
 314  
 315  		if (*count < HOST_CAN_HAS_DEBUGGER_COUNT) {
 316  			return KERN_FAILURE;
 317  		}
 318  
 319  		can_has_debugger_info = (host_can_has_debugger_info_t)info;
 320  		can_has_debugger_info->can_has_debugger = PE_i_can_has_debugger(NULL);
 321  		*count = HOST_CAN_HAS_DEBUGGER_COUNT;
 322  
 323  		return KERN_SUCCESS;
 324  	}
 325  
 326  	case HOST_VM_PURGABLE: {
 327  		if (*count < HOST_VM_PURGABLE_COUNT) {
 328  			return KERN_FAILURE;
 329  		}
 330  
 331  		vm_purgeable_stats((vm_purgeable_info_t)info, NULL);
 332  
 333  		*count = HOST_VM_PURGABLE_COUNT;
 334  		return KERN_SUCCESS;
 335  	}
 336  
 337  	case HOST_DEBUG_INFO_INTERNAL: {
 338  #if DEVELOPMENT || DEBUG
 339  		if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT) {
 340  			return KERN_FAILURE;
 341  		}
 342  
 343  		host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info;
 344  		bzero(debug_info, sizeof(host_debug_info_internal_data_t));
 345  		*count = HOST_DEBUG_INFO_INTERNAL_COUNT;
 346  
 347  #if CONFIG_COALITIONS
 348  		debug_info->config_coalitions = 1;
 349  #endif
 350  		debug_info->config_bank = 1;
 351  #if CONFIG_ATM
 352  		debug_info->config_atm = 1;
 353  #endif
 354  #if CONFIG_CSR
 355  		debug_info->config_csr = 1;
 356  #endif
 357  		return KERN_SUCCESS;
 358  #else /* DEVELOPMENT || DEBUG */
 359  		return KERN_NOT_SUPPORTED;
 360  #endif
 361  	}
 362  
 363  	case HOST_PREFERRED_USER_ARCH: {
 364  		host_preferred_user_arch_t user_arch_info;
 365  
 366  		/*
 367  		 *	Basic information about this host.
 368  		 */
 369  		if (*count < HOST_PREFERRED_USER_ARCH_COUNT) {
 370  			return KERN_FAILURE;
 371  		}
 372  
 373  		user_arch_info = (host_preferred_user_arch_t)info;
 374  
 375  #if defined(PREFERRED_USER_CPU_TYPE) && defined(PREFERRED_USER_CPU_SUBTYPE)
 376  		cpu_type_t preferred_cpu_type;
 377  		cpu_subtype_t preferred_cpu_subtype;
 378  		if (!PE_get_default("kern.preferred_cpu_type", &preferred_cpu_type, sizeof(cpu_type_t))) {
 379  			preferred_cpu_type = PREFERRED_USER_CPU_TYPE;
 380  		}
 381  		if (!PE_get_default("kern.preferred_cpu_subtype", &preferred_cpu_subtype, sizeof(cpu_subtype_t))) {
 382  			preferred_cpu_subtype = PREFERRED_USER_CPU_SUBTYPE;
 383  		}
 384  		user_arch_info->cpu_type    = preferred_cpu_type;
 385  		user_arch_info->cpu_subtype = preferred_cpu_subtype;
 386  #else
 387  		int master_id               = master_processor->cpu_id;
 388  		user_arch_info->cpu_type    = slot_type(master_id);
 389  		user_arch_info->cpu_subtype = slot_subtype(master_id);
 390  #endif
 391  
 392  
 393  		*count = HOST_PREFERRED_USER_ARCH_COUNT;
 394  
 395  		return KERN_SUCCESS;
 396  	}
 397  
 398  	default: return KERN_INVALID_ARGUMENT;
 399  	}
 400  }
 401  #endif // __DARLING__
 402  
 403  kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
 404  
 405  #ifndef __DARLING__
 406  kern_return_t
 407  host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
 408  {
 409  	if (host == HOST_NULL) {
 410  		return KERN_INVALID_HOST;
 411  	}
 412  
 413  	switch (flavor) {
 414  	case HOST_LOAD_INFO: {
 415  		host_load_info_t load_info;
 416  
 417  		if (*count < HOST_LOAD_INFO_COUNT) {
 418  			return KERN_FAILURE;
 419  		}
 420  
 421  		load_info = (host_load_info_t)info;
 422  
 423  		bcopy((char *)avenrun, (char *)load_info->avenrun, sizeof avenrun);
 424  		bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor);
 425  
 426  		*count = HOST_LOAD_INFO_COUNT;
 427  		return KERN_SUCCESS;
 428  	}
 429  
 430  	case HOST_VM_INFO: {
 431  		vm_statistics64_data_t host_vm_stat;
 432  		vm_statistics_t stat32;
 433  		mach_msg_type_number_t original_count;
 434  
 435  		if (*count < HOST_VM_INFO_REV0_COUNT) {
 436  			return KERN_FAILURE;
 437  		}
 438  
 439  		get_host_vm_stats(&host_vm_stat);
 440  
 441  		stat32 = (vm_statistics_t)info;
 442  
 443  		stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
 444  		stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
 445  
 446  		if (vm_page_local_q) {
 447  			zpercpu_foreach(lq, vm_page_local_q) {
 448  				stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
 449  			}
 450  		}
 451  		stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
 452  #if !XNU_TARGET_OS_OSX
 453  		stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
 454  #else /* !XNU_TARGET_OS_OSX */
 455  		stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
 456  #endif /* !XNU_TARGET_OS_OSX */
 457  		stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
 458  		stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
 459  		stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
 460  		stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
 461  		stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
 462  		stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
 463  		stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
 464  		stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
 465  
 466  		/*
 467  		 * Fill in extra info added in later revisions of the
 468  		 * vm_statistics data structure.  Fill in only what can fit
 469  		 * in the data structure the caller gave us !
 470  		 */
 471  		original_count = *count;
 472  		*count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
 473  		if (original_count >= HOST_VM_INFO_REV1_COUNT) {
 474  			/* rev1 added "purgeable" info */
 475  			stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
 476  			stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
 477  			*count = HOST_VM_INFO_REV1_COUNT;
 478  		}
 479  
 480  		if (original_count >= HOST_VM_INFO_REV2_COUNT) {
 481  			/* rev2 added "speculative" info */
 482  			stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
 483  			*count = HOST_VM_INFO_REV2_COUNT;
 484  		}
 485  
 486  		/* rev3 changed some of the fields to be 64-bit*/
 487  
 488  		return KERN_SUCCESS;
 489  	}
 490  
 491  	case HOST_CPU_LOAD_INFO: {
 492  		host_cpu_load_info_t cpu_load_info;
 493  
 494  		if (*count < HOST_CPU_LOAD_INFO_COUNT) {
 495  			return KERN_FAILURE;
 496  		}
 497  
 498  #define GET_TICKS_VALUE(state, ticks)                                                      \
 499  	MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
 500  	MACRO_END
 501  #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer)                            \
 502  	MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&(processor)->timer)); \
 503  	MACRO_END
 504  
 505  		cpu_load_info = (host_cpu_load_info_t)info;
 506  		cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
 507  		cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
 508  		cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
 509  		cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
 510  
 511  		simple_lock(&processor_list_lock, LCK_GRP_NULL);
 512  
 513  		unsigned int pcount = processor_count;
 514  
 515  		for (unsigned int i = 0; i < pcount; i++) {
 516  			processor_t processor = processor_array[i];
 517  			assert(processor != PROCESSOR_NULL);
 518  
 519  			timer_t idle_state;
 520  			uint64_t idle_time_snapshot1, idle_time_snapshot2;
 521  			uint64_t idle_time_tstamp1, idle_time_tstamp2;
 522  
 523  			/* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
 524  
 525  			GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, user_state);
 526  			if (precise_user_kernel_time) {
 527  				GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_SYSTEM, system_state);
 528  			} else {
 529  				/* system_state may represent either sys or user */
 530  				GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, system_state);
 531  			}
 532  
 533  			idle_state = &processor->idle_state;
 534  			idle_time_snapshot1 = timer_grab(idle_state);
 535  			idle_time_tstamp1 = idle_state->tstamp;
 536  
 537  			if (processor->current_state != idle_state) {
 538  				/* Processor is non-idle, so idle timer should be accurate */
 539  				GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state);
 540  			} else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
 541  			    (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) {
 542  				/* Idle timer is being updated concurrently, second stamp is good enough */
 543  				GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot2);
 544  			} else {
 545  				/*
 546  				 * Idle timer may be very stale. Fortunately we have established
 547  				 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
 548  				 */
 549  				idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
 550  
 551  				GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot1);
 552  			}
 553  		}
 554  		simple_unlock(&processor_list_lock);
 555  
 556  		*count = HOST_CPU_LOAD_INFO_COUNT;
 557  
 558  		return KERN_SUCCESS;
 559  	}
 560  
 561  	case HOST_EXPIRED_TASK_INFO: {
 562  		if (*count < TASK_POWER_INFO_COUNT) {
 563  			return KERN_FAILURE;
 564  		}
 565  
 566  		task_power_info_t tinfo1 = (task_power_info_t)info;
 567  		task_power_info_v2_t tinfo2 = (task_power_info_v2_t)info;
 568  
 569  		tinfo1->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
 570  		tinfo1->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
 571  
 572  		tinfo1->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
 573  
 574  		tinfo1->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
 575  
 576  		tinfo1->total_user = dead_task_statistics.total_user_time;
 577  		tinfo1->total_system = dead_task_statistics.total_system_time;
 578  		if (*count < TASK_POWER_INFO_V2_COUNT) {
 579  			*count = TASK_POWER_INFO_COUNT;
 580  		} else if (*count >= TASK_POWER_INFO_V2_COUNT) {
 581  			tinfo2->gpu_energy.task_gpu_utilisation = dead_task_statistics.task_gpu_ns;
 582  #if defined(__arm__) || defined(__arm64__)
 583  			tinfo2->task_energy = dead_task_statistics.task_energy;
 584  			tinfo2->task_ptime = dead_task_statistics.total_ptime;
 585  			tinfo2->task_pset_switches = dead_task_statistics.total_pset_switches;
 586  #endif
 587  			*count = TASK_POWER_INFO_V2_COUNT;
 588  		}
 589  
 590  		return KERN_SUCCESS;
 591  	}
 592  	default: return KERN_INVALID_ARGUMENT;
 593  	}
 594  }
 595  #endif // __DARLING__
 596  
 597  extern uint32_t c_segment_pages_compressed;
 598  
 599  #define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
 600  #define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
 601  #define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
 602  
 603  uint64_t host_statistics_time_window;
 604  
 605  static LCK_GRP_DECLARE(host_statistics_lck_grp, "host_statistics");
 606  static LCK_MTX_DECLARE(host_statistics_lck, &host_statistics_lck_grp);
 607  
 608  #define HOST_VM_INFO64_REV0             0
 609  #define HOST_VM_INFO64_REV1             1
 610  #define HOST_EXTMOD_INFO64_REV0         2
 611  #define HOST_LOAD_INFO_REV0             3
 612  #define HOST_VM_INFO_REV0               4
 613  #define HOST_VM_INFO_REV1               5
 614  #define HOST_VM_INFO_REV2               6
 615  #define HOST_CPU_LOAD_INFO_REV0         7
 616  #define HOST_EXPIRED_TASK_INFO_REV0     8
 617  #define HOST_EXPIRED_TASK_INFO_REV1     9
 618  #define NUM_HOST_INFO_DATA_TYPES        10
 619  
 620  static vm_statistics64_data_t host_vm_info64_rev0 = {};
 621  static vm_statistics64_data_t host_vm_info64_rev1 = {};
 622  static vm_extmod_statistics_data_t host_extmod_info64 = {};
 623  static host_load_info_data_t host_load_info = {};
 624  static vm_statistics_data_t host_vm_info_rev0 = {};
 625  static vm_statistics_data_t host_vm_info_rev1 = {};
 626  static vm_statistics_data_t host_vm_info_rev2 = {};
 627  static host_cpu_load_info_data_t host_cpu_load_info = {};
 628  static task_power_info_data_t host_expired_task_info = {};
 629  static task_power_info_v2_data_t host_expired_task_info2 = {};
 630  
 631  struct host_stats_cache {
 632  	uint64_t last_access;
 633  	uint64_t current_requests;
 634  	uint64_t max_requests;
 635  	uintptr_t data;
 636  	mach_msg_type_number_t count; //NOTE count is in sizeof(integer_t)
 637  };
 638  
 639  static struct host_stats_cache g_host_stats_cache[NUM_HOST_INFO_DATA_TYPES] = {
 640  	[HOST_VM_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev0, .count = HOST_VM_INFO64_REV0_COUNT },
 641  	[HOST_VM_INFO64_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev1, .count = HOST_VM_INFO64_REV1_COUNT },
 642  	[HOST_EXTMOD_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_extmod_info64, .count = HOST_EXTMOD_INFO64_COUNT },
 643  	[HOST_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_load_info, .count = HOST_LOAD_INFO_COUNT },
 644  	[HOST_VM_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev0, .count = HOST_VM_INFO_REV0_COUNT },
 645  	[HOST_VM_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev1, .count = HOST_VM_INFO_REV1_COUNT },
 646  	[HOST_VM_INFO_REV2] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev2, .count = HOST_VM_INFO_REV2_COUNT },
 647  	[HOST_CPU_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_cpu_load_info, .count = HOST_CPU_LOAD_INFO_COUNT },
 648  	[HOST_EXPIRED_TASK_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info, .count = TASK_POWER_INFO_COUNT },
 649  	[HOST_EXPIRED_TASK_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info2, .count = TASK_POWER_INFO_V2_COUNT},
 650  };
 651  
 652  
 653  void
 654  host_statistics_init(void)
 655  {
 656  	nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW * NSEC_PER_SEC), &host_statistics_time_window);
 657  #ifdef __DARLING__
 658  	lck_mtx_init(&host_statistics_lck, LCK_GRP_NULL, LCK_ATTR_NULL);
 659  #endif // __DARLING__
 660  }
 661  
 662  static void
 663  cache_host_statistics(int index, host_info64_t info)
 664  {
 665  	if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
 666  		return;
 667  	}
 668  
 669  	task_t task = current_task();
 670  	if (task->t_flags & TF_PLATFORM) {
 671  		return;
 672  	}
 673  
 674  	memcpy((void *)g_host_stats_cache[index].data, info, g_host_stats_cache[index].count * sizeof(integer_t));
 675  	return;
 676  }
 677  
 678  static void
 679  get_cached_info(int index, host_info64_t info, mach_msg_type_number_t* count)
 680  {
 681  	if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
 682  		*count = 0;
 683  		return;
 684  	}
 685  
 686  	*count = g_host_stats_cache[index].count;
 687  	memcpy(info, (void *)g_host_stats_cache[index].data, g_host_stats_cache[index].count * sizeof(integer_t));
 688  }
 689  
 690  static int
 691  get_host_info_data_index(bool is_stat64, host_flavor_t flavor, mach_msg_type_number_t* count, kern_return_t* ret)
 692  {
 693  	switch (flavor) {
 694  	case HOST_VM_INFO64:
 695  		if (!is_stat64) {
 696  			*ret = KERN_INVALID_ARGUMENT;
 697  			return -1;
 698  		}
 699  		if (*count < HOST_VM_INFO64_REV0_COUNT) {
 700  			*ret = KERN_FAILURE;
 701  			return -1;
 702  		}
 703  		if (*count >= HOST_VM_INFO64_REV1_COUNT) {
 704  			return HOST_VM_INFO64_REV1;
 705  		}
 706  		return HOST_VM_INFO64_REV0;
 707  
 708  	case HOST_EXTMOD_INFO64:
 709  		if (!is_stat64) {
 710  			*ret = KERN_INVALID_ARGUMENT;
 711  			return -1;
 712  		}
 713  		if (*count < HOST_EXTMOD_INFO64_COUNT) {
 714  			*ret = KERN_FAILURE;
 715  			return -1;
 716  		}
 717  		return HOST_EXTMOD_INFO64_REV0;
 718  
 719  	case HOST_LOAD_INFO:
 720  		if (*count < HOST_LOAD_INFO_COUNT) {
 721  			*ret = KERN_FAILURE;
 722  			return -1;
 723  		}
 724  		return HOST_LOAD_INFO_REV0;
 725  
 726  	case HOST_VM_INFO:
 727  		if (*count < HOST_VM_INFO_REV0_COUNT) {
 728  			*ret = KERN_FAILURE;
 729  			return -1;
 730  		}
 731  		if (*count >= HOST_VM_INFO_REV2_COUNT) {
 732  			return HOST_VM_INFO_REV2;
 733  		}
 734  		if (*count >= HOST_VM_INFO_REV1_COUNT) {
 735  			return HOST_VM_INFO_REV1;
 736  		}
 737  		return HOST_VM_INFO_REV0;
 738  
 739  	case HOST_CPU_LOAD_INFO:
 740  		if (*count < HOST_CPU_LOAD_INFO_COUNT) {
 741  			*ret = KERN_FAILURE;
 742  			return -1;
 743  		}
 744  		return HOST_CPU_LOAD_INFO_REV0;
 745  
 746  	case HOST_EXPIRED_TASK_INFO:
 747  		if (*count < TASK_POWER_INFO_COUNT) {
 748  			*ret = KERN_FAILURE;
 749  			return -1;
 750  		}
 751  		if (*count >= TASK_POWER_INFO_V2_COUNT) {
 752  			return HOST_EXPIRED_TASK_INFO_REV1;
 753  		}
 754  		return HOST_EXPIRED_TASK_INFO_REV0;
 755  
 756  	default:
 757  		*ret = KERN_INVALID_ARGUMENT;
 758  		return -1;
 759  	}
 760  }
 761  
 762  static bool
 763  rate_limit_host_statistics(bool is_stat64, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t* count, kern_return_t* ret, int *pindex)
 764  {
 765  	task_t task = current_task();
 766  
 767  	assert(task != kernel_task);
 768  
 769  	*ret = KERN_SUCCESS;
 770  
 771  	/* Access control only for third party applications */
 772  	if (task->t_flags & TF_PLATFORM) {
 773  		return FALSE;
 774  	}
 775  
 776  	/* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
 777  	bool rate_limited = FALSE;
 778  	bool set_last_access = TRUE;
 779  
 780  	/* there is a cache for every flavor */
 781  	int index = get_host_info_data_index(is_stat64, flavor, count, ret);
 782  	if (index == -1) {
 783  		goto out;
 784  	}
 785  
 786  	*pindex = index;
 787  	lck_mtx_lock(&host_statistics_lck);
 788  	if (g_host_stats_cache[index].last_access > mach_continuous_time() - host_statistics_time_window) {
 789  		set_last_access = FALSE;
 790  		if (g_host_stats_cache[index].current_requests++ >= g_host_stats_cache[index].max_requests) {
 791  			rate_limited = TRUE;
 792  			get_cached_info(index, info, count);
 793  		}
 794  	}
 795  	if (set_last_access) {
 796  		g_host_stats_cache[index].current_requests = 1;
 797  		/*
 798  		 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
 799  		 * to let query host_statistics.
 800  		 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
 801  		 * the provious window.
 802  		 */
 803  		g_host_stats_cache[index].max_requests = (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS - HOST_STATISTICS_MIN_REQUESTS + 1)) + HOST_STATISTICS_MIN_REQUESTS;
 804  		g_host_stats_cache[index].last_access = mach_continuous_time();
 805  	}
 806  	lck_mtx_unlock(&host_statistics_lck);
 807  out:
 808  	return rate_limited;
 809  }
 810  
 811  #ifndef __DARLING__
 812  kern_return_t
 813  vm_stats(void *info, unsigned int *count)
 814  {
 815  	vm_statistics64_data_t host_vm_stat;
 816  	mach_msg_type_number_t original_count;
 817  	unsigned int local_q_internal_count;
 818  	unsigned int local_q_external_count;
 819  
 820  	if (*count < HOST_VM_INFO64_REV0_COUNT) {
 821  		return KERN_FAILURE;
 822  	}
 823  	get_host_vm_stats(&host_vm_stat);
 824  
 825  	vm_statistics64_t stat = (vm_statistics64_t)info;
 826  
 827  	stat->free_count = vm_page_free_count + vm_page_speculative_count;
 828  	stat->active_count = vm_page_active_count;
 829  
 830  	local_q_internal_count = 0;
 831  	local_q_external_count = 0;
 832  	if (vm_page_local_q) {
 833  		zpercpu_foreach(lq, vm_page_local_q) {
 834  			stat->active_count += lq->vpl_count;
 835  			local_q_internal_count += lq->vpl_internal_count;
 836  			local_q_external_count += lq->vpl_external_count;
 837  		}
 838  	}
 839  	stat->inactive_count = vm_page_inactive_count;
 840  #if !XNU_TARGET_OS_OSX
 841  	stat->wire_count = vm_page_wire_count;
 842  #else /* !XNU_TARGET_OS_OSX */
 843  	stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
 844  #endif /* !XNU_TARGET_OS_OSX */
 845  	stat->zero_fill_count = host_vm_stat.zero_fill_count;
 846  	stat->reactivations = host_vm_stat.reactivations;
 847  	stat->pageins = host_vm_stat.pageins;
 848  	stat->pageouts = host_vm_stat.pageouts;
 849  	stat->faults = host_vm_stat.faults;
 850  	stat->cow_faults = host_vm_stat.cow_faults;
 851  	stat->lookups = host_vm_stat.lookups;
 852  	stat->hits = host_vm_stat.hits;
 853  
 854  	stat->purgeable_count = vm_page_purgeable_count;
 855  	stat->purges = vm_page_purged_count;
 856  
 857  	stat->speculative_count = vm_page_speculative_count;
 858  
 859  	/*
 860  	 * Fill in extra info added in later revisions of the
 861  	 * vm_statistics data structure.  Fill in only what can fit
 862  	 * in the data structure the caller gave us !
 863  	 */
 864  	original_count = *count;
 865  	*count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
 866  	if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
 867  		/* rev1 added "throttled count" */
 868  		stat->throttled_count = vm_page_throttled_count;
 869  		/* rev1 added "compression" info */
 870  		stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
 871  		stat->compressions = host_vm_stat.compressions;
 872  		stat->decompressions = host_vm_stat.decompressions;
 873  		stat->swapins = host_vm_stat.swapins;
 874  		stat->swapouts = host_vm_stat.swapouts;
 875  		/* rev1 added:
 876  		 * "external page count"
 877  		 * "anonymous page count"
 878  		 * "total # of pages (uncompressed) held in the compressor"
 879  		 */
 880  		stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count);
 881  		stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count);
 882  		stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
 883  		*count = HOST_VM_INFO64_REV1_COUNT;
 884  	}
 885  
 886  	return KERN_SUCCESS;
 887  }
 888  #endif // __DARLING__
 889  
 890  kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
 891  
 892  kern_return_t
 893  host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
 894  {
 895  	if (host == HOST_NULL) {
 896  		return KERN_INVALID_HOST;
 897  	}
 898  
 899  	switch (flavor) {
 900  	case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
 901  		return vm_stats(info, count);
 902  
 903  	case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
 904  	{
 905  		vm_extmod_statistics_t out_extmod_statistics;
 906  
 907  		if (*count < HOST_EXTMOD_INFO64_COUNT) {
 908  			return KERN_FAILURE;
 909  		}
 910  
 911  		out_extmod_statistics = (vm_extmod_statistics_t)info;
 912  		*out_extmod_statistics = host_extmod_statistics;
 913  
 914  		*count = HOST_EXTMOD_INFO64_COUNT;
 915  
 916  		return KERN_SUCCESS;
 917  	}
 918  
 919  	default: /* If we didn't recognize the flavor, send to host_statistics */
 920  		return host_statistics(host, flavor, (host_info_t)info, count);
 921  	}
 922  }
 923  
 924  kern_return_t
 925  host_statistics64_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
 926  {
 927  	kern_return_t ret = KERN_SUCCESS;
 928  	int index;
 929  
 930  	if (host == HOST_NULL) {
 931  		return KERN_INVALID_HOST;
 932  	}
 933  
 934  	if (rate_limit_host_statistics(TRUE, flavor, info, count, &ret, &index)) {
 935  		return ret;
 936  	}
 937  
 938  	if (ret != KERN_SUCCESS) {
 939  		return ret;
 940  	}
 941  
 942  	ret = host_statistics64(host, flavor, info, count);
 943  
 944  	if (ret == KERN_SUCCESS) {
 945  		cache_host_statistics(index, info);
 946  	}
 947  
 948  	return ret;
 949  }
 950  
 951  kern_return_t
 952  host_statistics_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
 953  {
 954  	kern_return_t ret = KERN_SUCCESS;
 955  	int index;
 956  
 957  	if (host == HOST_NULL) {
 958  		return KERN_INVALID_HOST;
 959  	}
 960  
 961  	if (rate_limit_host_statistics(FALSE, flavor, info, count, &ret, &index)) {
 962  		return ret;
 963  	}
 964  
 965  	if (ret != KERN_SUCCESS) {
 966  		return ret;
 967  	}
 968  
 969  	ret = host_statistics(host, flavor, info, count);
 970  
 971  	if (ret == KERN_SUCCESS) {
 972  		cache_host_statistics(index, info);
 973  	}
 974  
 975  	return ret;
 976  }
 977  
 978  /*
 979   * Get host statistics that require privilege.
 980   * None for now, just call the un-privileged version.
 981   */
 982  kern_return_t
 983  host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
 984  {
 985  	return host_statistics((host_t)host_priv, flavor, info, count);
 986  }
 987  
 988  kern_return_t
 989  set_sched_stats_active(boolean_t active)
 990  {
 991  	sched_stats_active = active;
 992  	return KERN_SUCCESS;
 993  }
 994  
 995  kern_return_t
 996  get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
 997  {
 998  	uint32_t pos = 0;
 999  
1000  	if (!sched_stats_active) {
1001  		return KERN_FAILURE;
1002  	}
1003  
1004  	percpu_foreach_base(pcpu_base) {
1005  		struct sched_statistics stats;
1006  		processor_t processor;
1007  
1008  		pos += sizeof(struct _processor_statistics_np);
1009  		if (pos > *count) {
1010  			return KERN_FAILURE;
1011  		}
1012  
1013  		stats = *PERCPU_GET_WITH_BASE(pcpu_base, sched_stats);
1014  		processor = PERCPU_GET_WITH_BASE(pcpu_base, processor);
1015  
1016  		out->ps_cpuid = processor->cpu_id;
1017  		out->ps_csw_count = stats.csw_count;
1018  		out->ps_preempt_count = stats.preempt_count;
1019  		out->ps_preempted_rt_count = stats.preempted_rt_count;
1020  		out->ps_preempted_by_rt_count = stats.preempted_by_rt_count;
1021  		out->ps_rt_sched_count = stats.rt_sched_count;
1022  		out->ps_interrupt_count = stats.interrupt_count;
1023  		out->ps_ipi_count = stats.ipi_count;
1024  		out->ps_timer_pop_count = stats.timer_pop_count;
1025  		out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
1026  		out->ps_idle_transitions = stats.idle_transitions;
1027  		out->ps_quantum_timer_expirations = stats.quantum_timer_expirations;
1028  
1029  		out++;
1030  	}
1031  
1032  	/* And include RT Queue information */
1033  	pos += sizeof(struct _processor_statistics_np);
1034  	if (pos > *count) {
1035  		return KERN_FAILURE;
1036  	}
1037  
1038  	bzero(out, sizeof(*out));
1039  	out->ps_cpuid = (-1);
1040  	out->ps_runq_count_sum = SCHED(rt_runq_count_sum)();
1041  	out++;
1042  
1043  	*count = pos;
1044  
1045  	return KERN_SUCCESS;
1046  }
1047  
1048  kern_return_t
1049  host_page_size(host_t host, vm_size_t * out_page_size)
1050  {
1051  	if (host == HOST_NULL) {
1052  		return KERN_INVALID_ARGUMENT;
1053  	}
1054  
1055  	*out_page_size = PAGE_SIZE;
1056  
1057  	return KERN_SUCCESS;
1058  }
1059  
1060  /*
1061   *	Return kernel version string (more than you ever
1062   *	wanted to know about what version of the kernel this is).
1063   */
1064  extern char version[];
1065  
1066  kern_return_t
1067  host_kernel_version(host_t host, kernel_version_t out_version)
1068  {
1069  	if (host == HOST_NULL) {
1070  		return KERN_INVALID_ARGUMENT;
1071  	}
1072  
1073  	(void)strncpy(out_version, version, sizeof(kernel_version_t));
1074  
1075  	return KERN_SUCCESS;
1076  }
1077  
1078  /*
1079   *	host_processor_sets:
1080   *
1081   *	List all processor sets on the host.
1082   */
1083  kern_return_t
1084  host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_list, mach_msg_type_number_t * count)
1085  {
1086  	void * addr;
1087  
1088  	if (host_priv == HOST_PRIV_NULL) {
1089  		return KERN_INVALID_ARGUMENT;
1090  	}
1091  
1092  	/*
1093  	 *	Allocate memory.  Can be pageable because it won't be
1094  	 *	touched while holding a lock.
1095  	 */
1096  
1097  	addr = kalloc((vm_size_t)sizeof(mach_port_t));
1098  	if (addr == 0) {
1099  		return KERN_RESOURCE_SHORTAGE;
1100  	}
1101  
1102  	/* do the conversion that Mig should handle */
1103  	*((ipc_port_t *)addr) = convert_pset_name_to_port(&pset0);
1104  
1105  	*pset_list = (processor_set_array_t)addr;
1106  	*count = 1;
1107  
1108  	return KERN_SUCCESS;
1109  }
1110  
1111  /*
1112   *	host_processor_set_priv:
1113   *
1114   *	Return control port for given processor set.
1115   */
1116  kern_return_t
1117  host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, processor_set_t * pset)
1118  {
1119  	if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
1120  		*pset = PROCESSOR_SET_NULL;
1121  
1122  		return KERN_INVALID_ARGUMENT;
1123  	}
1124  
1125  	*pset = pset_name;
1126  
1127  	return KERN_SUCCESS;
1128  }
1129  
1130  /*
1131   *	host_processor_info
1132   *
1133   *	Return info about the processors on this host.  It will return
1134   *	the number of processors, and the specific type of info requested
1135   *	in an OOL array.
1136   */
1137  kern_return_t
1138  host_processor_info(host_t host,
1139      processor_flavor_t flavor,
1140      natural_t * out_pcount,
1141      processor_info_array_t * out_array,
1142      mach_msg_type_number_t * out_array_count)
1143  {
1144  	kern_return_t result;
1145  	host_t thost;
1146  	processor_info_t info;
1147  	unsigned int icount;
1148  	unsigned int pcount;
1149  	vm_offset_t addr;
1150  	vm_size_t size, needed;
1151  	vm_map_copy_t copy;
1152  
1153  	if (host == HOST_NULL) {
1154  		return KERN_INVALID_ARGUMENT;
1155  	}
1156  
1157  	result = processor_info_count(flavor, &icount);
1158  	if (result != KERN_SUCCESS) {
1159  		return result;
1160  	}
1161  
1162  	pcount = processor_count;
1163  	assert(pcount != 0);
1164  
1165  	needed = pcount * icount * sizeof(natural_t);
1166  	size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map));
1167  	result = kmem_alloc(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC);
1168  	if (result != KERN_SUCCESS) {
1169  		return KERN_RESOURCE_SHORTAGE;
1170  	}
1171  
1172  	info = (processor_info_t)addr;
1173  
1174  	for (unsigned int i = 0; i < pcount; i++) {
1175  		processor_t processor = processor_array[i];
1176  		assert(processor != PROCESSOR_NULL);
1177  
1178  		unsigned int tcount = icount;
1179  
1180  		result = processor_info(processor, flavor, &thost, info, &tcount);
1181  		if (result != KERN_SUCCESS) {
1182  			kmem_free(ipc_kernel_map, addr, size);
1183  			return result;
1184  		}
1185  		info += icount;
1186  	}
1187  
1188  	if (size != needed) {
1189  		bzero((char *)addr + needed, size - needed);
1190  	}
1191  
1192  	result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
1193  	    vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
1194  	assert(result == KERN_SUCCESS);
1195  	result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, &copy);
1196  	assert(result == KERN_SUCCESS);
1197  
1198  	*out_pcount = pcount;
1199  	*out_array = (processor_info_array_t)copy;
1200  	*out_array_count = pcount * icount;
1201  
1202  	return KERN_SUCCESS;
1203  }
1204  
1205  static bool
1206  is_valid_host_special_port(int id)
1207  {
1208  	return (id <= HOST_MAX_SPECIAL_PORT) &&
1209  	       (id >= HOST_MIN_SPECIAL_PORT) &&
1210  	       ((id <= HOST_LAST_SPECIAL_KERNEL_PORT) || (id > HOST_MAX_SPECIAL_KERNEL_PORT));
1211  }
1212  
1213  extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
1214  
1215  /*
1216   *      Kernel interface for setting a special port.
1217   */
1218  kern_return_t
1219  kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1220  {
1221  	ipc_port_t old_port;
1222  
1223  	if (!is_valid_host_special_port(id)) {
1224  		panic("attempted to set invalid special port %d", id);
1225  	}
1226  
1227  #if !MACH_FLIPC
1228  	if (id == HOST_NODE_PORT) {
1229  		return KERN_NOT_SUPPORTED;
1230  	}
1231  #endif
1232  
1233  	host_lock(host_priv);
1234  	old_port = host_priv->special[id];
1235  #ifndef __DARLING__
1236  	if ((id == HOST_AMFID_PORT) && (current_task()->bsd_info != initproc)) {
1237  		host_unlock(host_priv);
1238  		return KERN_NO_ACCESS;
1239  	}
1240  #endif // __DARLING__
1241  	host_priv->special[id] = port;
1242  	host_unlock(host_priv);
1243  
1244  #if MACH_FLIPC
1245  	if (id == HOST_NODE_PORT) {
1246  		mach_node_port_changed();
1247  	}
1248  #endif
1249  
1250  	if (IP_VALID(old_port)) {
1251  		ipc_port_release_send(old_port);
1252  	}
1253  	return KERN_SUCCESS;
1254  }
1255  
1256  /*
1257   *      Kernel interface for retrieving a special port.
1258   */
1259  kern_return_t
1260  kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp)
1261  {
1262  	if (!is_valid_host_special_port(id)) {
1263  		panic("attempted to get invalid special port %d", id);
1264  	}
1265  
1266  	host_lock(host_priv);
1267  	*portp = host_priv->special[id];
1268  	host_unlock(host_priv);
1269  	return KERN_SUCCESS;
1270  }
1271  
1272  /*
1273   *      User interface for setting a special port.
1274   *
1275   *      Only permits the user to set a user-owned special port
1276   *      ID, rejecting a kernel-owned special port ID.
1277   *
1278   *      A special kernel port cannot be set up using this
1279   *      routine; use kernel_set_special_port() instead.
1280   */
1281  kern_return_t
1282  host_set_special_port_from_user(host_priv_t host_priv, int id, ipc_port_t port)
1283  {
1284  	if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1285  		return KERN_INVALID_ARGUMENT;
1286  	}
1287  
1288  	if (task_is_driver(current_task())) {
1289  		return KERN_NO_ACCESS;
1290  	}
1291  
1292  	if (IP_VALID(port) && (port->ip_immovable_receive || port->ip_immovable_send)) {
1293  		return KERN_INVALID_RIGHT;
1294  	}
1295  
1296  	return host_set_special_port(host_priv, id, port);
1297  }
1298  
1299  kern_return_t
1300  host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1301  {
1302  	if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1303  		return KERN_INVALID_ARGUMENT;
1304  	}
1305  
1306  #if CONFIG_MACF
1307  	if (mac_task_check_set_host_special_port(current_task(), id, port) != 0) {
1308  		return KERN_NO_ACCESS;
1309  	}
1310  #endif
1311  
1312  	return kernel_set_special_port(host_priv, id, port);
1313  }
1314  
1315  /*
1316   *      User interface for retrieving a special port.
1317   *
1318   *      Note that there is nothing to prevent a user special
1319   *      port from disappearing after it has been discovered by
1320   *      the caller; thus, using a special port can always result
1321   *      in a "port not valid" error.
1322   */
1323  
1324  kern_return_t
1325  host_get_special_port_from_user(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1326  {
1327  	if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1328  		return KERN_INVALID_ARGUMENT;
1329  	}
1330  
1331  	task_t task = current_task();
1332  	if (task && task_is_driver(task) && id > HOST_MAX_SPECIAL_KERNEL_PORT) {
1333  		/* allow HID drivers to get the sysdiagnose port for keychord handling */
1334  		if (id == HOST_SYSDIAGNOSE_PORT &&
1335  		    IOTaskHasEntitlement(task, kIODriverKitHIDFamilyEventServiceEntitlementKey)) {
1336  			goto get_special_port;
1337  		}
1338  		return KERN_NO_ACCESS;
1339  	}
1340  get_special_port:
1341  	return host_get_special_port(host_priv, node, id, portp);
1342  }
1343  
1344  kern_return_t
1345  host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1346  {
1347  	ipc_port_t port;
1348  
1349  	if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1350  		return KERN_INVALID_ARGUMENT;
1351  	}
1352  
1353  	host_lock(host_priv);
1354  	port = realhost.special[id];
1355  	*portp = ipc_port_copy_send(port);
1356  	host_unlock(host_priv);
1357  
1358  	return KERN_SUCCESS;
1359  }
1360  
1361  /*
1362   *	host_get_io_master
1363   *
1364   *	Return the IO master access port for this host.
1365   */
1366  kern_return_t
1367  host_get_io_master(host_t host, io_master_t * io_masterp)
1368  {
1369  	if (host == HOST_NULL) {
1370  		return KERN_INVALID_ARGUMENT;
1371  	}
1372  
1373  	return host_get_io_master_port(host_priv_self(), io_masterp);
1374  }
1375  
1376  host_t
1377  host_self(void)
1378  {
1379  	return &realhost;
1380  }
1381  
1382  host_priv_t
1383  host_priv_self(void)
1384  {
1385  	return &realhost;
1386  }
1387  
1388  host_security_t
1389  host_security_self(void)
1390  {
1391  	return &realhost;
1392  }
1393  
1394  kern_return_t
1395  host_set_atm_diagnostic_flag(host_t host, uint32_t diagnostic_flag)
1396  {
1397  	if (host == HOST_NULL) {
1398  		return KERN_INVALID_ARGUMENT;
1399  	}
1400  
1401  	if (!IOTaskHasEntitlement(current_task(), "com.apple.private.set-atm-diagnostic-flag")) {
1402  		return KERN_NO_ACCESS;
1403  	}
1404  
1405  #if CONFIG_ATM
1406  	return atm_set_diagnostic_config(diagnostic_flag);
1407  #else
1408  	(void)diagnostic_flag;
1409  	return KERN_NOT_SUPPORTED;
1410  #endif
1411  }
1412  
1413  kern_return_t
1414  host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config)
1415  {
1416  #if !defined(XNU_TARGET_OS_OSX)
1417  	if (host_priv == HOST_PRIV_NULL) {
1418  		return KERN_INVALID_ARGUMENT;
1419  	}
1420  
1421  #ifndef __DARLING__
1422  	/*
1423  	 * Always enforce that the multiuser bit is set
1424  	 * if a value is written to the commpage word.
1425  	 */
1426  	commpage_update_multiuser_config(multiuser_config | kIsMultiUserDevice);
1427  #endif // __DARLING__
1428  	return KERN_SUCCESS;
1429  #else
1430  	(void)host_priv;
1431  	(void)multiuser_config;
1432  	return KERN_NOT_SUPPORTED;
1433  #endif
1434  }