/ duct-tape / xnu / osfmk / kern / task_policy.c
task_policy.c
   1  /*
   2   * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
   3   *
   4   * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
   5   *
   6   * This file contains Original Code and/or Modifications of Original Code
   7   * as defined in and that are subject to the Apple Public Source License
   8   * Version 2.0 (the 'License'). You may not use this file except in
   9   * compliance with the License. The rights granted to you under the License
  10   * may not be used to create, or enable the creation or redistribution of,
  11   * unlawful or unlicensed copies of an Apple operating system, or to
  12   * circumvent, violate, or enable the circumvention or violation of, any
  13   * terms of an Apple operating system software license agreement.
  14   *
  15   * Please obtain a copy of the License at
  16   * http://www.opensource.apple.com/apsl/ and read it before using this file.
  17   *
  18   * The Original Code and all software distributed under the License are
  19   * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  20   * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  21   * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  22   * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  23   * Please see the License for the specific language governing rights and
  24   * limitations under the License.
  25   *
  26   * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  27   */
  28  
  29  #include <kern/policy_internal.h>
  30  #include <mach/task_policy.h>
  31  
  32  #include <mach/mach_types.h>
  33  #include <mach/task_server.h>
  34  
  35  #include <kern/host.h>                  /* host_priv_self()        */
  36  #include <mach/host_priv.h>             /* host_get_special_port() */
  37  #include <mach/host_special_ports.h>    /* RESOURCE_NOTIFY_PORT    */
  38  #include <kern/sched.h>
  39  #include <kern/task.h>
  40  #include <mach/thread_policy.h>
  41  #include <sys/errno.h>
  42  #include <sys/resource.h>
  43  #include <machine/limits.h>
  44  #include <kern/ledger.h>
  45  #include <kern/thread_call.h>
  46  #include <kern/sfi.h>
  47  #include <kern/coalition.h>
  48  #if CONFIG_TELEMETRY
  49  #include <kern/telemetry.h>
  50  #endif
  51  #if !defined(XNU_TARGET_OS_OSX)
  52  #include <kern/kalloc.h>
  53  #include <sys/errno.h>
  54  #endif /* !defined(XNU_TARGET_OS_OSX) */
  55  
  56  #if IMPORTANCE_INHERITANCE
  57  #include <ipc/ipc_importance.h>
  58  #if IMPORTANCE_TRACE
  59  #include <mach/machine/sdt.h>
  60  #endif /* IMPORTANCE_TRACE */
  61  #endif /* IMPORTANCE_INHERITACE */
  62  
  63  #include <sys/kdebug.h>
  64  
  65  /*
  66   *  Task Policy
  67   *
  68   *  This subsystem manages task and thread IO priority and backgrounding,
  69   *  as well as importance inheritance, process suppression, task QoS, and apptype.
  70   *  These properties have a suprising number of complex interactions, so they are
  71   *  centralized here in one state machine to simplify the implementation of those interactions.
  72   *
  73   *  Architecture:
  74   *  Threads and tasks have two policy fields: requested, effective.
  75   *  Requested represents the wishes of each interface that influences task policy.
  76   *  Effective represents the distillation of that policy into a set of behaviors.
  77   *
  78   *  Each thread making a modification in the policy system passes a 'pending' struct,
  79   *  which tracks updates that will be applied after dropping the policy engine lock.
  80   *
  81   *  Each interface that has an input into the task policy state machine controls a field in requested.
  82   *  If the interface has a getter, it returns what is in the field in requested, but that is
  83   *  not necessarily what is actually in effect.
  84   *
  85   *  All kernel subsystems that behave differently based on task policy call into
  86   *  the proc_get_effective_(task|thread)_policy functions, which return the decision of the task policy state machine
  87   *  for that subsystem by querying only the 'effective' field.
  88   *
  89   *  Policy change operations:
  90   *  Here are the steps to change a policy on a task or thread:
  91   *  1) Lock task
  92   *  2) Change requested field for the relevant policy
  93   *  3) Run a task policy update, which recalculates effective based on requested,
  94   *     then takes a diff between the old and new versions of requested and calls the relevant
  95   *     other subsystems to apply these changes, and updates the pending field.
  96   *  4) Unlock task
  97   *  5) Run task policy update complete, which looks at the pending field to update
  98   *     subsystems which cannot be touched while holding the task lock.
  99   *
 100   *  To add a new requested policy, add the field in the requested struct, the flavor in task.h,
 101   *  the setter and getter in proc_(set|get)_task_policy*,
 102   *  then set up the effects of that behavior in task_policy_update*. If the policy manifests
 103   *  itself as a distinct effective policy, add it to the effective struct and add it to the
 104   *  proc_get_effective_task_policy accessor.
 105   *
 106   *  Most policies are set via proc_set_task_policy, but policies that don't fit that interface
 107   *  roll their own lock/set/update/unlock/complete code inside this file.
 108   *
 109   *
 110   *  Suppression policy
 111   *
 112   *  These are a set of behaviors that can be requested for a task.  They currently have specific
 113   *  implied actions when they're enabled, but they may be made customizable in the future.
 114   *
 115   *  When the affected task is boosted, we temporarily disable the suppression behaviors
 116   *  so that the affected process has a chance to run so it can call the API to permanently
 117   *  disable the suppression behaviors.
 118   *
 119   *  Locking
 120   *
 121   *  Changing task policy on a task takes the task lock.
 122   *  Changing task policy on a thread takes the thread mutex.
 123   *  Task policy changes that affect threads will take each thread's mutex to update it if necessary.
 124   *
 125   *  Querying the effective policy does not take a lock, because callers
 126   *  may run in interrupt context or other place where locks are not OK.
 127   *
 128   *  This means that any notification of state change needs to be externally synchronized.
 129   *  We do this by idempotent callouts after the state has changed to ask
 130   *  other subsystems to update their view of the world.
 131   *
 132   * TODO: Move all cpu/wakes/io monitor code into a separate file
 133   * TODO: Move all importance code over to importance subsystem
 134   * TODO: Move all taskwatch code into a separate file
 135   * TODO: Move all VM importance code into a separate file
 136   */
 137  
 138  /* Task policy related helper functions */
 139  static void proc_set_task_policy_locked(task_t task, int category, int flavor, int value, int value2);
 140  
 141  static void task_policy_update_locked(task_t task, task_pend_token_t pend_token);
 142  static void task_policy_update_internal_locked(task_t task, bool in_create, task_pend_token_t pend_token);
 143  
 144  /* For attributes that have two scalars as input/output */
 145  static void proc_set_task_policy2(task_t task, int category, int flavor, int value1, int value2);
 146  static void proc_get_task_policy2(task_t task, int category, int flavor, int *value1, int *value2);
 147  
 148  static boolean_t task_policy_update_coalition_focal_tasks(task_t task, int prev_role, int next_role, task_pend_token_t pend_token);
 149  
 150  static uint64_t task_requested_bitfield(task_t task);
 151  static uint64_t task_effective_bitfield(task_t task);
 152  
 153  /* Convenience functions for munging a policy bitfield into a tracepoint */
 154  static uintptr_t trequested_0(task_t task);
 155  static uintptr_t trequested_1(task_t task);
 156  static uintptr_t teffective_0(task_t task);
 157  static uintptr_t teffective_1(task_t task);
 158  
 159  /* CPU limits helper functions */
 160  static int task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int entitled);
 161  static int task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep, int *scope);
 162  static int task_enable_cpumon_locked(task_t task);
 163  static int task_disable_cpumon(task_t task);
 164  static int task_clear_cpuusage_locked(task_t task, int cpumon_entitled);
 165  static int task_apply_resource_actions(task_t task, int type);
 166  static void task_action_cpuusage(thread_call_param_t param0, thread_call_param_t param1);
 167  
 168  #ifdef MACH_BSD
 169  typedef struct proc *   proc_t;
 170  int                     proc_pid(struct proc *proc);
 171  extern int              proc_selfpid(void);
 172  extern char *           proc_name_address(void *p);
 173  extern char *           proc_best_name(proc_t proc);
 174  
 175  extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg,
 176      char *buffer, uint32_t buffersize,
 177      int32_t *retval);
 178  #endif /* MACH_BSD */
 179  
 180  
 181  #if CONFIG_TASKWATCH
 182  /* Taskwatch related helper functions */
 183  static void set_thread_appbg(thread_t thread, int setbg, int importance);
 184  static void add_taskwatch_locked(task_t task, task_watch_t * twp);
 185  static void remove_taskwatch_locked(task_t task, task_watch_t * twp);
 186  static void task_watch_lock(void);
 187  static void task_watch_unlock(void);
 188  static void apply_appstate_watchers(task_t task);
 189  
 190  typedef struct task_watcher {
 191  	queue_chain_t   tw_links;       /* queueing of threads */
 192  	task_t          tw_task;        /* task that is being watched */
 193  	thread_t        tw_thread;      /* thread that is watching the watch_task */
 194  	int             tw_state;       /* the current app state of the thread */
 195  	int             tw_importance;  /* importance prior to backgrounding */
 196  } task_watch_t;
 197  
 198  typedef struct thread_watchlist {
 199  	thread_t        thread;         /* thread being worked on for taskwatch action */
 200  	int             importance;     /* importance to be restored if thread is being made active */
 201  } thread_watchlist_t;
 202  
 203  #endif /* CONFIG_TASKWATCH */
 204  
 205  extern int memorystatus_update_priority_for_appnap(proc_t p, boolean_t is_appnap);
 206  
 207  /* Importance Inheritance related helper functions */
 208  
 209  #if IMPORTANCE_INHERITANCE
 210  
 211  static void task_importance_mark_live_donor(task_t task, boolean_t donating);
 212  static void task_importance_mark_receiver(task_t task, boolean_t receiving);
 213  static void task_importance_mark_denap_receiver(task_t task, boolean_t denap);
 214  
 215  static boolean_t task_is_marked_live_importance_donor(task_t task);
 216  static boolean_t task_is_importance_receiver(task_t task);
 217  static boolean_t task_is_importance_denap_receiver(task_t task);
 218  
 219  static int task_importance_hold_internal_assertion(task_t target_task, uint32_t count);
 220  
 221  static void task_add_importance_watchport(task_t task, mach_port_t port, int *boostp);
 222  static void task_importance_update_live_donor(task_t target_task);
 223  
 224  static void task_set_boost_locked(task_t task, boolean_t boost_active);
 225  
 226  #endif /* IMPORTANCE_INHERITANCE */
 227  
 228  #if IMPORTANCE_TRACE
 229  #define __imptrace_only
 230  #else /* IMPORTANCE_TRACE */
 231  #define __imptrace_only __unused
 232  #endif /* !IMPORTANCE_TRACE */
 233  
 234  #if IMPORTANCE_INHERITANCE
 235  #define __imp_only
 236  #else
 237  #define __imp_only __unused
 238  #endif
 239  
 240  /*
 241   * Default parameters for certain policies
 242   */
 243  
 244  int proc_standard_daemon_tier = THROTTLE_LEVEL_TIER1;
 245  int proc_suppressed_disk_tier = THROTTLE_LEVEL_TIER1;
 246  int proc_tal_disk_tier        = THROTTLE_LEVEL_TIER1;
 247  
 248  int proc_graphics_timer_qos   = (LATENCY_QOS_TIER_0 & 0xFF);
 249  
 250  const int proc_default_bg_iotier  = THROTTLE_LEVEL_TIER2;
 251  
 252  /* Latency/throughput QoS fields remain zeroed, i.e. TIER_UNSPECIFIED at creation */
 253  const struct task_requested_policy default_task_requested_policy = {
 254  	.trp_bg_iotier = proc_default_bg_iotier
 255  };
 256  const struct task_effective_policy default_task_effective_policy = {};
 257  
 258  /*
 259   * Default parameters for CPU usage monitor.
 260   *
 261   * Default setting is 50% over 3 minutes.
 262   */
 263  #define         DEFAULT_CPUMON_PERCENTAGE 50
 264  #define         DEFAULT_CPUMON_INTERVAL   (3 * 60)
 265  
 266  uint8_t         proc_max_cpumon_percentage;
 267  uint64_t        proc_max_cpumon_interval;
 268  
 269  
 270  kern_return_t
 271  qos_latency_policy_validate(task_latency_qos_t ltier)
 272  {
 273  	if ((ltier != LATENCY_QOS_TIER_UNSPECIFIED) &&
 274  	    ((ltier > LATENCY_QOS_TIER_5) || (ltier < LATENCY_QOS_TIER_0))) {
 275  		return KERN_INVALID_ARGUMENT;
 276  	}
 277  
 278  	return KERN_SUCCESS;
 279  }
 280  
 281  kern_return_t
 282  qos_throughput_policy_validate(task_throughput_qos_t ttier)
 283  {
 284  	if ((ttier != THROUGHPUT_QOS_TIER_UNSPECIFIED) &&
 285  	    ((ttier > THROUGHPUT_QOS_TIER_5) || (ttier < THROUGHPUT_QOS_TIER_0))) {
 286  		return KERN_INVALID_ARGUMENT;
 287  	}
 288  
 289  	return KERN_SUCCESS;
 290  }
 291  
 292  static kern_return_t
 293  task_qos_policy_validate(task_qos_policy_t qosinfo, mach_msg_type_number_t count)
 294  {
 295  	if (count < TASK_QOS_POLICY_COUNT) {
 296  		return KERN_INVALID_ARGUMENT;
 297  	}
 298  
 299  	task_latency_qos_t ltier = qosinfo->task_latency_qos_tier;
 300  	task_throughput_qos_t ttier = qosinfo->task_throughput_qos_tier;
 301  
 302  	kern_return_t kr = qos_latency_policy_validate(ltier);
 303  
 304  	if (kr != KERN_SUCCESS) {
 305  		return kr;
 306  	}
 307  
 308  	kr = qos_throughput_policy_validate(ttier);
 309  
 310  	return kr;
 311  }
 312  
 313  uint32_t
 314  qos_extract(uint32_t qv)
 315  {
 316  	return qv & 0xFF;
 317  }
 318  
 319  uint32_t
 320  qos_latency_policy_package(uint32_t qv)
 321  {
 322  	return (qv == LATENCY_QOS_TIER_UNSPECIFIED) ? LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | qv);
 323  }
 324  
 325  uint32_t
 326  qos_throughput_policy_package(uint32_t qv)
 327  {
 328  	return (qv == THROUGHPUT_QOS_TIER_UNSPECIFIED) ? THROUGHPUT_QOS_TIER_UNSPECIFIED : ((0xFE << 16) | qv);
 329  }
 330  
 331  #define TASK_POLICY_SUPPRESSION_DISABLE  0x1
 332  #define TASK_POLICY_SUPPRESSION_IOTIER2  0x2
 333  #define TASK_POLICY_SUPPRESSION_NONDONOR 0x4
 334  /* TEMPORARY boot-arg controlling task_policy suppression (App Nap) */
 335  static boolean_t task_policy_suppression_flags = TASK_POLICY_SUPPRESSION_IOTIER2 |
 336      TASK_POLICY_SUPPRESSION_NONDONOR;
 337  
 338  kern_return_t
 339  task_policy_set(
 340  	task_t                                  task,
 341  	task_policy_flavor_t    flavor,
 342  	task_policy_t                   policy_info,
 343  	mach_msg_type_number_t  count)
 344  {
 345  	kern_return_t           result = KERN_SUCCESS;
 346  
 347  	if (task == TASK_NULL || task == kernel_task) {
 348  		return KERN_INVALID_ARGUMENT;
 349  	}
 350  
 351  	switch (flavor) {
 352  	case TASK_CATEGORY_POLICY: {
 353  		task_category_policy_t info = (task_category_policy_t)policy_info;
 354  
 355  		if (count < TASK_CATEGORY_POLICY_COUNT) {
 356  			return KERN_INVALID_ARGUMENT;
 357  		}
 358  
 359  #if !defined(XNU_TARGET_OS_OSX)
 360  		/* On embedded, you can't modify your own role. */
 361  		if (current_task() == task) {
 362  			return KERN_INVALID_ARGUMENT;
 363  		}
 364  #endif
 365  
 366  		switch (info->role) {
 367  		case TASK_FOREGROUND_APPLICATION:
 368  		case TASK_BACKGROUND_APPLICATION:
 369  		case TASK_DEFAULT_APPLICATION:
 370  			proc_set_task_policy(task,
 371  			    TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
 372  			    info->role);
 373  			break;
 374  
 375  		case TASK_CONTROL_APPLICATION:
 376  			if (task != current_task() || task->sec_token.val[0] != 0) {
 377  				result = KERN_INVALID_ARGUMENT;
 378  			} else {
 379  				proc_set_task_policy(task,
 380  				    TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
 381  				    info->role);
 382  			}
 383  			break;
 384  
 385  		case TASK_GRAPHICS_SERVER:
 386  			/* TODO: Restrict this role to FCFS <rdar://problem/12552788> */
 387  			if (task != current_task() || task->sec_token.val[0] != 0) {
 388  				result = KERN_INVALID_ARGUMENT;
 389  			} else {
 390  				proc_set_task_policy(task,
 391  				    TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
 392  				    info->role);
 393  			}
 394  			break;
 395  		default:
 396  			result = KERN_INVALID_ARGUMENT;
 397  			break;
 398  		} /* switch (info->role) */
 399  
 400  		break;
 401  	}
 402  
 403  /* Desired energy-efficiency/performance "quality-of-service" */
 404  	case TASK_BASE_QOS_POLICY:
 405  	case TASK_OVERRIDE_QOS_POLICY:
 406  	{
 407  		task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
 408  		kern_return_t kr = task_qos_policy_validate(qosinfo, count);
 409  
 410  		if (kr != KERN_SUCCESS) {
 411  			return kr;
 412  		}
 413  
 414  
 415  		uint32_t lqos = qos_extract(qosinfo->task_latency_qos_tier);
 416  		uint32_t tqos = qos_extract(qosinfo->task_throughput_qos_tier);
 417  
 418  		proc_set_task_policy2(task, TASK_POLICY_ATTRIBUTE,
 419  		    flavor == TASK_BASE_QOS_POLICY ? TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS : TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS,
 420  		    lqos, tqos);
 421  	}
 422  	break;
 423  
 424  	case TASK_BASE_LATENCY_QOS_POLICY:
 425  	{
 426  		task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
 427  		kern_return_t kr = task_qos_policy_validate(qosinfo, count);
 428  
 429  		if (kr != KERN_SUCCESS) {
 430  			return kr;
 431  		}
 432  
 433  		uint32_t lqos = qos_extract(qosinfo->task_latency_qos_tier);
 434  
 435  		proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_BASE_LATENCY_QOS_POLICY, lqos);
 436  	}
 437  	break;
 438  
 439  	case TASK_BASE_THROUGHPUT_QOS_POLICY:
 440  	{
 441  		task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
 442  		kern_return_t kr = task_qos_policy_validate(qosinfo, count);
 443  
 444  		if (kr != KERN_SUCCESS) {
 445  			return kr;
 446  		}
 447  
 448  		uint32_t tqos = qos_extract(qosinfo->task_throughput_qos_tier);
 449  
 450  		proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_BASE_THROUGHPUT_QOS_POLICY, tqos);
 451  	}
 452  	break;
 453  
 454  	case TASK_SUPPRESSION_POLICY:
 455  	{
 456  #if !defined(XNU_TARGET_OS_OSX)
 457  		/*
 458  		 * Suppression policy is not enabled for embedded
 459  		 * because apps aren't marked as denap receivers
 460  		 */
 461  		result = KERN_INVALID_ARGUMENT;
 462  		break;
 463  #else /* !defined(XNU_TARGET_OS_OSX) */
 464  
 465  		task_suppression_policy_t info = (task_suppression_policy_t)policy_info;
 466  
 467  		if (count < TASK_SUPPRESSION_POLICY_COUNT) {
 468  			return KERN_INVALID_ARGUMENT;
 469  		}
 470  
 471  		struct task_qos_policy qosinfo;
 472  
 473  		qosinfo.task_latency_qos_tier = info->timer_throttle;
 474  		qosinfo.task_throughput_qos_tier = info->throughput_qos;
 475  
 476  		kern_return_t kr = task_qos_policy_validate(&qosinfo, TASK_QOS_POLICY_COUNT);
 477  
 478  		if (kr != KERN_SUCCESS) {
 479  			return kr;
 480  		}
 481  
 482  		/* TEMPORARY disablement of task suppression */
 483  		if (info->active &&
 484  		    (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_DISABLE)) {
 485  			return KERN_SUCCESS;
 486  		}
 487  
 488  		struct task_pend_token pend_token = {};
 489  
 490  		task_lock(task);
 491  
 492  		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
 493  		    (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_START,
 494  		    proc_selfpid(), task_pid(task), trequested_0(task),
 495  		    trequested_1(task), 0);
 496  
 497  		task->requested_policy.trp_sup_active      = (info->active)         ? 1 : 0;
 498  		task->requested_policy.trp_sup_lowpri_cpu  = (info->lowpri_cpu)     ? 1 : 0;
 499  		task->requested_policy.trp_sup_timer       = qos_extract(info->timer_throttle);
 500  		task->requested_policy.trp_sup_disk        = (info->disk_throttle)  ? 1 : 0;
 501  		task->requested_policy.trp_sup_throughput  = qos_extract(info->throughput_qos);
 502  		task->requested_policy.trp_sup_cpu         = (info->suppressed_cpu) ? 1 : 0;
 503  		task->requested_policy.trp_sup_bg_sockets  = (info->background_sockets) ? 1 : 0;
 504  
 505  		task_policy_update_locked(task, &pend_token);
 506  
 507  		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
 508  		    (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_END,
 509  		    proc_selfpid(), task_pid(task), trequested_0(task),
 510  		    trequested_1(task), 0);
 511  
 512  		task_unlock(task);
 513  
 514  		task_policy_update_complete_unlocked(task, &pend_token);
 515  
 516  		break;
 517  
 518  #endif /* !defined(XNU_TARGET_OS_OSX) */
 519  	}
 520  
 521  	default:
 522  		result = KERN_INVALID_ARGUMENT;
 523  		break;
 524  	}
 525  
 526  	return result;
 527  }
 528  
 529  /* Sets BSD 'nice' value on the task */
 530  kern_return_t
 531  task_importance(
 532  	task_t                          task,
 533  	integer_t                       importance)
 534  {
 535  	if (task == TASK_NULL || task == kernel_task) {
 536  		return KERN_INVALID_ARGUMENT;
 537  	}
 538  
 539  	task_lock(task);
 540  
 541  	if (!task->active) {
 542  		task_unlock(task);
 543  
 544  		return KERN_TERMINATED;
 545  	}
 546  
 547  	if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) >= TASK_CONTROL_APPLICATION) {
 548  		task_unlock(task);
 549  
 550  		return KERN_INVALID_ARGUMENT;
 551  	}
 552  
 553  	task->importance = importance;
 554  
 555  	struct task_pend_token pend_token = {};
 556  
 557  	task_policy_update_locked(task, &pend_token);
 558  
 559  	task_unlock(task);
 560  
 561  	task_policy_update_complete_unlocked(task, &pend_token);
 562  
 563  	return KERN_SUCCESS;
 564  }
 565  
 566  kern_return_t
 567  task_policy_get(
 568  	task_t                                  task,
 569  	task_policy_flavor_t    flavor,
 570  	task_policy_t                   policy_info,
 571  	mach_msg_type_number_t  *count,
 572  	boolean_t                               *get_default)
 573  {
 574  	if (task == TASK_NULL || task == kernel_task) {
 575  		return KERN_INVALID_ARGUMENT;
 576  	}
 577  
 578  	switch (flavor) {
 579  	case TASK_CATEGORY_POLICY:
 580  	{
 581  		task_category_policy_t          info = (task_category_policy_t)policy_info;
 582  
 583  		if (*count < TASK_CATEGORY_POLICY_COUNT) {
 584  			return KERN_INVALID_ARGUMENT;
 585  		}
 586  
 587  		if (*get_default) {
 588  			info->role = TASK_UNSPECIFIED;
 589  		} else {
 590  			info->role = proc_get_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
 591  		}
 592  		break;
 593  	}
 594  
 595  	case TASK_BASE_QOS_POLICY: /* FALLTHRU */
 596  	case TASK_OVERRIDE_QOS_POLICY:
 597  	{
 598  		task_qos_policy_t info = (task_qos_policy_t)policy_info;
 599  
 600  		if (*count < TASK_QOS_POLICY_COUNT) {
 601  			return KERN_INVALID_ARGUMENT;
 602  		}
 603  
 604  		if (*get_default) {
 605  			info->task_latency_qos_tier = LATENCY_QOS_TIER_UNSPECIFIED;
 606  			info->task_throughput_qos_tier = THROUGHPUT_QOS_TIER_UNSPECIFIED;
 607  		} else if (flavor == TASK_BASE_QOS_POLICY) {
 608  			int value1, value2;
 609  
 610  			proc_get_task_policy2(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS, &value1, &value2);
 611  
 612  			info->task_latency_qos_tier = qos_latency_policy_package(value1);
 613  			info->task_throughput_qos_tier = qos_throughput_policy_package(value2);
 614  		} else if (flavor == TASK_OVERRIDE_QOS_POLICY) {
 615  			int value1, value2;
 616  
 617  			proc_get_task_policy2(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS, &value1, &value2);
 618  
 619  			info->task_latency_qos_tier = qos_latency_policy_package(value1);
 620  			info->task_throughput_qos_tier = qos_throughput_policy_package(value2);
 621  		}
 622  
 623  		break;
 624  	}
 625  
 626  	case TASK_POLICY_STATE:
 627  	{
 628  		task_policy_state_t info = (task_policy_state_t)policy_info;
 629  
 630  		if (*count < TASK_POLICY_STATE_COUNT) {
 631  			return KERN_INVALID_ARGUMENT;
 632  		}
 633  
 634  		/* Only root can get this info */
 635  		if (current_task()->sec_token.val[0] != 0) {
 636  			return KERN_PROTECTION_FAILURE;
 637  		}
 638  
 639  		if (*get_default) {
 640  			info->requested = 0;
 641  			info->effective = 0;
 642  			info->pending = 0;
 643  			info->imp_assertcnt = 0;
 644  			info->imp_externcnt = 0;
 645  			info->flags = 0;
 646  			info->imp_transitions = 0;
 647  		} else {
 648  			task_lock(task);
 649  
 650  			info->requested = task_requested_bitfield(task);
 651  			info->effective = task_effective_bitfield(task);
 652  			info->pending   = 0;
 653  
 654  			info->tps_requested_policy = *(uint64_t*)(&task->requested_policy);
 655  			info->tps_effective_policy = *(uint64_t*)(&task->effective_policy);
 656  
 657  			info->flags = 0;
 658  			if (task->task_imp_base != NULL) {
 659  				info->imp_assertcnt = task->task_imp_base->iit_assertcnt;
 660  				info->imp_externcnt = IIT_EXTERN(task->task_imp_base);
 661  				info->flags |= (task_is_marked_importance_receiver(task) ? TASK_IMP_RECEIVER : 0);
 662  				info->flags |= (task_is_marked_importance_denap_receiver(task) ? TASK_DENAP_RECEIVER : 0);
 663  				info->flags |= (task_is_marked_importance_donor(task) ? TASK_IMP_DONOR : 0);
 664  				info->flags |= (task_is_marked_live_importance_donor(task) ? TASK_IMP_LIVE_DONOR : 0);
 665  				info->flags |= (get_task_pidsuspended(task) ? TASK_IS_PIDSUSPENDED : 0);
 666  				info->imp_transitions = task->task_imp_base->iit_transitions;
 667  			} else {
 668  				info->imp_assertcnt = 0;
 669  				info->imp_externcnt = 0;
 670  				info->imp_transitions = 0;
 671  			}
 672  			task_unlock(task);
 673  		}
 674  
 675  		break;
 676  	}
 677  
 678  	case TASK_SUPPRESSION_POLICY:
 679  	{
 680  		task_suppression_policy_t info = (task_suppression_policy_t)policy_info;
 681  
 682  		if (*count < TASK_SUPPRESSION_POLICY_COUNT) {
 683  			return KERN_INVALID_ARGUMENT;
 684  		}
 685  
 686  		task_lock(task);
 687  
 688  		if (*get_default) {
 689  			info->active            = 0;
 690  			info->lowpri_cpu        = 0;
 691  			info->timer_throttle    = LATENCY_QOS_TIER_UNSPECIFIED;
 692  			info->disk_throttle     = 0;
 693  			info->cpu_limit         = 0;
 694  			info->suspend           = 0;
 695  			info->throughput_qos    = 0;
 696  			info->suppressed_cpu    = 0;
 697  		} else {
 698  			info->active            = task->requested_policy.trp_sup_active;
 699  			info->lowpri_cpu        = task->requested_policy.trp_sup_lowpri_cpu;
 700  			info->timer_throttle    = qos_latency_policy_package(task->requested_policy.trp_sup_timer);
 701  			info->disk_throttle     = task->requested_policy.trp_sup_disk;
 702  			info->cpu_limit         = 0;
 703  			info->suspend           = 0;
 704  			info->throughput_qos    = qos_throughput_policy_package(task->requested_policy.trp_sup_throughput);
 705  			info->suppressed_cpu    = task->requested_policy.trp_sup_cpu;
 706  			info->background_sockets = task->requested_policy.trp_sup_bg_sockets;
 707  		}
 708  
 709  		task_unlock(task);
 710  		break;
 711  	}
 712  
 713  	default:
 714  		return KERN_INVALID_ARGUMENT;
 715  	}
 716  
 717  	return KERN_SUCCESS;
 718  }
 719  
 720  /*
 721   * Called at task creation
 722   * We calculate the correct effective but don't apply it to anything yet.
 723   * The threads, etc will inherit from the task as they get created.
 724   */
 725  void
 726  task_policy_create(task_t task, task_t parent_task)
 727  {
 728  	task->requested_policy.trp_apptype          = parent_task->requested_policy.trp_apptype;
 729  
 730  	task->requested_policy.trp_int_darwinbg     = parent_task->requested_policy.trp_int_darwinbg;
 731  	task->requested_policy.trp_ext_darwinbg     = parent_task->requested_policy.trp_ext_darwinbg;
 732  	task->requested_policy.trp_int_iotier       = parent_task->requested_policy.trp_int_iotier;
 733  	task->requested_policy.trp_ext_iotier       = parent_task->requested_policy.trp_ext_iotier;
 734  	task->requested_policy.trp_int_iopassive    = parent_task->requested_policy.trp_int_iopassive;
 735  	task->requested_policy.trp_ext_iopassive    = parent_task->requested_policy.trp_ext_iopassive;
 736  	task->requested_policy.trp_bg_iotier        = parent_task->requested_policy.trp_bg_iotier;
 737  	task->requested_policy.trp_terminated       = parent_task->requested_policy.trp_terminated;
 738  	task->requested_policy.trp_qos_clamp        = parent_task->requested_policy.trp_qos_clamp;
 739  
 740  	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE && !task_is_exec_copy(task)) {
 741  		/* Do not update the apptype for exec copy task */
 742  		if (parent_task->requested_policy.trp_boosted) {
 743  			task->requested_policy.trp_apptype = TASK_APPTYPE_DAEMON_INTERACTIVE;
 744  			task_importance_mark_donor(task, TRUE);
 745  		} else {
 746  			task->requested_policy.trp_apptype = TASK_APPTYPE_DAEMON_BACKGROUND;
 747  			task_importance_mark_receiver(task, FALSE);
 748  		}
 749  	}
 750  
 751  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
 752  	    (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_START,
 753  	    task_pid(task), teffective_0(task),
 754  	    teffective_1(task), task->priority, 0);
 755  
 756  	task_policy_update_internal_locked(task, true, NULL);
 757  
 758  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
 759  	    (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_END,
 760  	    task_pid(task), teffective_0(task),
 761  	    teffective_1(task), task->priority, 0);
 762  
 763  	task_importance_update_live_donor(task);
 764  }
 765  
 766  
 767  static void
 768  task_policy_update_locked(task_t task, task_pend_token_t pend_token)
 769  {
 770  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
 771  	    (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK) | DBG_FUNC_START),
 772  	    task_pid(task), teffective_0(task),
 773  	    teffective_1(task), task->priority, 0);
 774  
 775  	task_policy_update_internal_locked(task, false, pend_token);
 776  
 777  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
 778  	    (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK)) | DBG_FUNC_END,
 779  	    task_pid(task), teffective_0(task),
 780  	    teffective_1(task), task->priority, 0);
 781  }
 782  
 783  /*
 784   * One state update function TO RULE THEM ALL
 785   *
 786   * This function updates the task or thread effective policy fields
 787   * and pushes the results to the relevant subsystems.
 788   *
 789   * Must call update_complete after unlocking the task,
 790   * as some subsystems cannot be updated while holding the task lock.
 791   *
 792   * Called with task locked, not thread
 793   */
 794  
 795  static void
 796  task_policy_update_internal_locked(task_t task, bool in_create, task_pend_token_t pend_token)
 797  {
 798  	/*
 799  	 * Step 1:
 800  	 *  Gather requested policy
 801  	 */
 802  
 803  	struct task_requested_policy requested = task->requested_policy;
 804  
 805  	/*
 806  	 * Step 2:
 807  	 *  Calculate new effective policies from requested policy and task state
 808  	 *  Rules:
 809  	 *      Don't change requested, it won't take effect
 810  	 */
 811  
 812  	struct task_effective_policy next = {};
 813  
 814  	/* Update task role */
 815  	next.tep_role = requested.trp_role;
 816  
 817  	/* Set task qos clamp and ceiling */
 818  	next.tep_qos_clamp = requested.trp_qos_clamp;
 819  
 820  	if (requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT) {
 821  		switch (next.tep_role) {
 822  		case TASK_FOREGROUND_APPLICATION:
 823  			/* Foreground apps get urgent scheduler priority */
 824  			next.tep_qos_ui_is_urgent = 1;
 825  			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
 826  			break;
 827  
 828  		case TASK_BACKGROUND_APPLICATION:
 829  			/* This is really 'non-focal but on-screen' */
 830  			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
 831  			break;
 832  
 833  		case TASK_DEFAULT_APPLICATION:
 834  			/* This is 'may render UI but we don't know if it's focal/nonfocal' */
 835  			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
 836  			break;
 837  
 838  		case TASK_NONUI_APPLICATION:
 839  			/* i.e. 'off-screen' */
 840  			next.tep_qos_ceiling = THREAD_QOS_LEGACY;
 841  			break;
 842  
 843  		case TASK_CONTROL_APPLICATION:
 844  		case TASK_GRAPHICS_SERVER:
 845  			next.tep_qos_ui_is_urgent = 1;
 846  			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
 847  			break;
 848  
 849  		case TASK_THROTTLE_APPLICATION:
 850  			/* i.e. 'TAL launch' */
 851  			next.tep_qos_ceiling = THREAD_QOS_UTILITY;
 852  			break;
 853  
 854  		case TASK_DARWINBG_APPLICATION:
 855  			/* i.e. 'DARWIN_BG throttled background application' */
 856  			next.tep_qos_ceiling = THREAD_QOS_BACKGROUND;
 857  			break;
 858  
 859  		case TASK_UNSPECIFIED:
 860  		default:
 861  			/* Apps that don't have an application role get
 862  			 * USER_INTERACTIVE and USER_INITIATED squashed to LEGACY */
 863  			next.tep_qos_ceiling = THREAD_QOS_LEGACY;
 864  			break;
 865  		}
 866  	} else {
 867  		/* Daemons and dext get USER_INTERACTIVE squashed to USER_INITIATED */
 868  		next.tep_qos_ceiling = THREAD_QOS_USER_INITIATED;
 869  	}
 870  
 871  	/* Calculate DARWIN_BG */
 872  	bool wants_darwinbg        = false;
 873  	bool wants_all_sockets_bg  = false; /* Do I want my existing sockets to be bg */
 874  	bool wants_watchersbg      = false; /* Do I want my pidbound threads to be bg */
 875  	bool adaptive_bg_only      = false; /* This task is BG only because it's adaptive unboosted */
 876  
 877  	/* Adaptive daemons are DARWIN_BG unless boosted, and don't get network throttled. */
 878  	if (requested.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE &&
 879  	    requested.trp_boosted == 0) {
 880  		wants_darwinbg = true;
 881  		adaptive_bg_only = true;
 882  	}
 883  
 884  	/*
 885  	 * If DARWIN_BG has been requested at either level, it's engaged.
 886  	 * Only true DARWIN_BG changes cause watchers to transition.
 887  	 *
 888  	 * Backgrounding due to apptype does.
 889  	 */
 890  	if (requested.trp_int_darwinbg || requested.trp_ext_darwinbg ||
 891  	    next.tep_role == TASK_DARWINBG_APPLICATION) {
 892  		wants_watchersbg = wants_all_sockets_bg = wants_darwinbg = true;
 893  		adaptive_bg_only = false;
 894  	}
 895  
 896  	/* Application launching in special Transparent App Lifecycle throttle mode */
 897  	if (requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT &&
 898  	    requested.trp_role == TASK_THROTTLE_APPLICATION) {
 899  		next.tep_tal_engaged = 1;
 900  	}
 901  
 902  	/* Background daemons are always DARWIN_BG, no exceptions, and don't get network throttled. */
 903  	if (requested.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) {
 904  		wants_darwinbg = true;
 905  		adaptive_bg_only = false;
 906  	}
 907  
 908  	if (next.tep_qos_clamp == THREAD_QOS_BACKGROUND ||
 909  	    next.tep_qos_clamp == THREAD_QOS_MAINTENANCE) {
 910  		wants_darwinbg = true;
 911  		adaptive_bg_only = false;
 912  	}
 913  
 914  	/* Calculate side effects of DARWIN_BG */
 915  
 916  	if (wants_darwinbg) {
 917  		next.tep_darwinbg = 1;
 918  		/* darwinbg tasks always create bg sockets, but we don't always loop over all sockets */
 919  		next.tep_new_sockets_bg = 1;
 920  		next.tep_lowpri_cpu = 1;
 921  	}
 922  
 923  	if (wants_all_sockets_bg) {
 924  		next.tep_all_sockets_bg = 1;
 925  	}
 926  
 927  	if (wants_watchersbg) {
 928  		next.tep_watchers_bg = 1;
 929  	}
 930  
 931  	next.tep_adaptive_bg = adaptive_bg_only;
 932  
 933  	/* Calculate low CPU priority */
 934  
 935  	boolean_t wants_lowpri_cpu = false;
 936  
 937  	if (wants_darwinbg) {
 938  		wants_lowpri_cpu = true;
 939  	}
 940  
 941  	if (next.tep_tal_engaged) {
 942  		wants_lowpri_cpu = true;
 943  	}
 944  
 945  	if (requested.trp_sup_lowpri_cpu && requested.trp_boosted == 0) {
 946  		wants_lowpri_cpu = true;
 947  	}
 948  
 949  	if (wants_lowpri_cpu) {
 950  		next.tep_lowpri_cpu = 1;
 951  	}
 952  
 953  	/* Calculate IO policy */
 954  
 955  	/* Update BG IO policy (so we can see if it has changed) */
 956  	next.tep_bg_iotier = requested.trp_bg_iotier;
 957  
 958  	int iopol = THROTTLE_LEVEL_TIER0;
 959  
 960  	if (wants_darwinbg) {
 961  		iopol = MAX(iopol, requested.trp_bg_iotier);
 962  	}
 963  
 964  	if (requested.trp_apptype == TASK_APPTYPE_DAEMON_STANDARD) {
 965  		iopol = MAX(iopol, proc_standard_daemon_tier);
 966  	}
 967  
 968  	if (requested.trp_sup_disk && requested.trp_boosted == 0) {
 969  		iopol = MAX(iopol, proc_suppressed_disk_tier);
 970  	}
 971  
 972  	if (next.tep_tal_engaged) {
 973  		iopol = MAX(iopol, proc_tal_disk_tier);
 974  	}
 975  
 976  	if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
 977  		iopol = MAX(iopol, thread_qos_policy_params.qos_iotier[next.tep_qos_clamp]);
 978  	}
 979  
 980  	iopol = MAX(iopol, requested.trp_int_iotier);
 981  	iopol = MAX(iopol, requested.trp_ext_iotier);
 982  
 983  	next.tep_io_tier = iopol;
 984  
 985  	/* Calculate Passive IO policy */
 986  
 987  	if (requested.trp_ext_iopassive || requested.trp_int_iopassive) {
 988  		next.tep_io_passive = 1;
 989  	}
 990  
 991  	/* Calculate suppression-active flag */
 992  	boolean_t appnap_transition = false;
 993  
 994  	if (requested.trp_sup_active && requested.trp_boosted == 0) {
 995  		next.tep_sup_active = 1;
 996  	}
 997  
 998  	if (task->effective_policy.tep_sup_active != next.tep_sup_active) {
 999  		appnap_transition = true;
1000  	}
1001  
1002  	/* Calculate timer QOS */
1003  	int latency_qos = requested.trp_base_latency_qos;
1004  
1005  	if (requested.trp_sup_timer && requested.trp_boosted == 0) {
1006  		latency_qos = requested.trp_sup_timer;
1007  	}
1008  
1009  	if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
1010  		latency_qos = MAX(latency_qos, (int)thread_qos_policy_params.qos_latency_qos[next.tep_qos_clamp]);
1011  	}
1012  
1013  	if (requested.trp_over_latency_qos != 0) {
1014  		latency_qos = requested.trp_over_latency_qos;
1015  	}
1016  
1017  	/* Treat the windowserver special */
1018  	if (requested.trp_role == TASK_GRAPHICS_SERVER) {
1019  		latency_qos = proc_graphics_timer_qos;
1020  	}
1021  
1022  	next.tep_latency_qos = latency_qos;
1023  
1024  	/* Calculate throughput QOS */
1025  	int through_qos = requested.trp_base_through_qos;
1026  
1027  	if (requested.trp_sup_throughput && requested.trp_boosted == 0) {
1028  		through_qos = requested.trp_sup_throughput;
1029  	}
1030  
1031  	if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
1032  		through_qos = MAX(through_qos, (int)thread_qos_policy_params.qos_through_qos[next.tep_qos_clamp]);
1033  	}
1034  
1035  	if (requested.trp_over_through_qos != 0) {
1036  		through_qos = requested.trp_over_through_qos;
1037  	}
1038  
1039  	next.tep_through_qos = through_qos;
1040  
1041  	/* Calculate suppressed CPU priority */
1042  	if (requested.trp_sup_cpu && requested.trp_boosted == 0) {
1043  		next.tep_suppressed_cpu = 1;
1044  	}
1045  
1046  	/*
1047  	 * Calculate background sockets
1048  	 * Don't take into account boosting to limit transition frequency.
1049  	 */
1050  	if (requested.trp_sup_bg_sockets) {
1051  		next.tep_all_sockets_bg = 1;
1052  		next.tep_new_sockets_bg = 1;
1053  	}
1054  
1055  	/* Apply SFI Managed class bit */
1056  	next.tep_sfi_managed = requested.trp_sfi_managed;
1057  
1058  	/* Calculate 'live donor' status for live importance */
1059  	switch (requested.trp_apptype) {
1060  	case TASK_APPTYPE_APP_TAL:
1061  	case TASK_APPTYPE_APP_DEFAULT:
1062  		if (requested.trp_ext_darwinbg == 1 ||
1063  		    (next.tep_sup_active == 1 &&
1064  		    (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_NONDONOR)) ||
1065  		    next.tep_role == TASK_DARWINBG_APPLICATION) {
1066  			next.tep_live_donor = 0;
1067  		} else {
1068  			next.tep_live_donor = 1;
1069  		}
1070  		break;
1071  
1072  	case TASK_APPTYPE_DAEMON_INTERACTIVE:
1073  	case TASK_APPTYPE_DAEMON_STANDARD:
1074  	case TASK_APPTYPE_DAEMON_ADAPTIVE:
1075  	case TASK_APPTYPE_DAEMON_BACKGROUND:
1076  	case TASK_APPTYPE_DRIVER:
1077  	default:
1078  		next.tep_live_donor = 0;
1079  		break;
1080  	}
1081  
1082  	if (requested.trp_terminated) {
1083  		/*
1084  		 * Shoot down the throttles that slow down exit or response to SIGTERM
1085  		 * We don't need to shoot down:
1086  		 * passive        (don't want to cause others to throttle)
1087  		 * all_sockets_bg (don't need to iterate FDs on every exit)
1088  		 * new_sockets_bg (doesn't matter for exiting process)
1089  		 * pidsuspend     (jetsam-ed BG process shouldn't run again)
1090  		 * watchers_bg    (watcher threads don't need to be unthrottled)
1091  		 * latency_qos    (affects userspace timers only)
1092  		 */
1093  
1094  		next.tep_terminated     = 1;
1095  		next.tep_darwinbg       = 0;
1096  		next.tep_lowpri_cpu     = 0;
1097  		next.tep_io_tier        = THROTTLE_LEVEL_TIER0;
1098  		next.tep_tal_engaged    = 0;
1099  		next.tep_role           = TASK_UNSPECIFIED;
1100  		next.tep_suppressed_cpu = 0;
1101  	}
1102  
1103  	/*
1104  	 * Step 3:
1105  	 *  Swap out old policy for new policy
1106  	 */
1107  
1108  	struct task_effective_policy prev = task->effective_policy;
1109  
1110  	/* This is the point where the new values become visible to other threads */
1111  	task->effective_policy = next;
1112  
1113  	/* Don't do anything further to a half-formed task */
1114  	if (in_create) {
1115  		return;
1116  	}
1117  
1118  	if (task == kernel_task) {
1119  		panic("Attempting to set task policy on kernel_task");
1120  	}
1121  
1122  	/*
1123  	 * Step 4:
1124  	 *  Pend updates that can't be done while holding the task lock
1125  	 */
1126  
1127  	if (prev.tep_all_sockets_bg != next.tep_all_sockets_bg) {
1128  		pend_token->tpt_update_sockets = 1;
1129  	}
1130  
1131  	/* Only re-scan the timer list if the qos level is getting less strong */
1132  	if (prev.tep_latency_qos > next.tep_latency_qos) {
1133  		pend_token->tpt_update_timers = 1;
1134  	}
1135  
1136  #if CONFIG_TASKWATCH
1137  	if (prev.tep_watchers_bg != next.tep_watchers_bg) {
1138  		pend_token->tpt_update_watchers = 1;
1139  	}
1140  #endif /* CONFIG_TASKWATCH */
1141  
1142  	if (prev.tep_live_donor != next.tep_live_donor) {
1143  		pend_token->tpt_update_live_donor = 1;
1144  	}
1145  
1146  	/*
1147  	 * Step 5:
1148  	 *  Update other subsystems as necessary if something has changed
1149  	 */
1150  
1151  	bool update_threads = false, update_sfi = false;
1152  
1153  	/*
1154  	 * Check for the attributes that thread_policy_update_internal_locked() consults,
1155  	 *  and trigger thread policy re-evaluation.
1156  	 */
1157  	if (prev.tep_io_tier != next.tep_io_tier ||
1158  	    prev.tep_bg_iotier != next.tep_bg_iotier ||
1159  	    prev.tep_io_passive != next.tep_io_passive ||
1160  	    prev.tep_darwinbg != next.tep_darwinbg ||
1161  	    prev.tep_qos_clamp != next.tep_qos_clamp ||
1162  	    prev.tep_qos_ceiling != next.tep_qos_ceiling ||
1163  	    prev.tep_qos_ui_is_urgent != next.tep_qos_ui_is_urgent ||
1164  	    prev.tep_latency_qos != next.tep_latency_qos ||
1165  	    prev.tep_through_qos != next.tep_through_qos ||
1166  	    prev.tep_lowpri_cpu != next.tep_lowpri_cpu ||
1167  	    prev.tep_new_sockets_bg != next.tep_new_sockets_bg ||
1168  	    prev.tep_terminated != next.tep_terminated ||
1169  	    prev.tep_adaptive_bg != next.tep_adaptive_bg) {
1170  		update_threads = true;
1171  	}
1172  
1173  	/*
1174  	 * Check for the attributes that sfi_thread_classify() consults,
1175  	 *  and trigger SFI re-evaluation.
1176  	 */
1177  	if (prev.tep_latency_qos != next.tep_latency_qos ||
1178  	    prev.tep_role != next.tep_role ||
1179  	    prev.tep_sfi_managed != next.tep_sfi_managed) {
1180  		update_sfi = true;
1181  	}
1182  
1183  	/* Reflect task role transitions into the coalition role counters */
1184  	if (prev.tep_role != next.tep_role) {
1185  		if (task_policy_update_coalition_focal_tasks(task, prev.tep_role, next.tep_role, pend_token)) {
1186  			update_sfi = true;
1187  		}
1188  	}
1189  
1190  	bool update_priority = false;
1191  
1192  	int16_t priority     = BASEPRI_DEFAULT;
1193  	int16_t max_priority = MAXPRI_USER;
1194  
1195  	if (next.tep_lowpri_cpu) {
1196  		priority = MAXPRI_THROTTLE;
1197  		max_priority = MAXPRI_THROTTLE;
1198  	} else if (next.tep_suppressed_cpu) {
1199  		priority = MAXPRI_SUPPRESSED;
1200  		max_priority = MAXPRI_SUPPRESSED;
1201  	} else {
1202  		switch (next.tep_role) {
1203  		case TASK_CONTROL_APPLICATION:
1204  			priority = BASEPRI_CONTROL;
1205  			break;
1206  		case TASK_GRAPHICS_SERVER:
1207  			priority = BASEPRI_GRAPHICS;
1208  			max_priority = MAXPRI_RESERVED;
1209  			break;
1210  		default:
1211  			break;
1212  		}
1213  
1214  		/* factor in 'nice' value */
1215  		priority += task->importance;
1216  
1217  		if (task->effective_policy.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
1218  			int16_t qos_clamp_priority = thread_qos_policy_params.qos_pri[task->effective_policy.tep_qos_clamp];
1219  
1220  			priority        = MIN(priority, qos_clamp_priority);
1221  			max_priority    = MIN(max_priority, qos_clamp_priority);
1222  		}
1223  
1224  		if (priority > max_priority) {
1225  			priority = max_priority;
1226  		} else if (priority < MINPRI) {
1227  			priority = MINPRI;
1228  		}
1229  	}
1230  
1231  	assert(priority <= max_priority);
1232  
1233  	/* avoid extra work if priority isn't changing */
1234  	if (priority != task->priority ||
1235  	    max_priority != task->max_priority) {
1236  		/* update the scheduling priority for the task */
1237  		task->max_priority  = max_priority;
1238  		task->priority      = priority;
1239  		update_priority     = true;
1240  	}
1241  
1242  	/* Loop over the threads in the task:
1243  	 * only once
1244  	 * only if necessary
1245  	 * with one thread mutex hold per thread
1246  	 */
1247  	if (update_threads || update_priority || update_sfi) {
1248  		thread_t thread;
1249  
1250  		queue_iterate(&task->threads, thread, thread_t, task_threads) {
1251  			struct task_pend_token thread_pend_token = {};
1252  
1253  			if (update_sfi) {
1254  				thread_pend_token.tpt_update_thread_sfi = 1;
1255  			}
1256  
1257  			if (update_priority || update_threads) {
1258  				thread_policy_update_tasklocked(thread,
1259  				    task->priority, task->max_priority,
1260  				    &thread_pend_token);
1261  			}
1262  
1263  			assert(!thread_pend_token.tpt_update_sockets);
1264  
1265  			// Slightly risky, as we still hold the task lock...
1266  			thread_policy_update_complete_unlocked(thread, &thread_pend_token);
1267  		}
1268  	}
1269  
1270  	/*
1271  	 * Use the app-nap transitions to influence the
1272  	 * transition of the process within the jetsam band
1273  	 * [and optionally its live-donor status]
1274  	 * On macOS only.
1275  	 */
1276  	if (appnap_transition) {
1277  		if (task->effective_policy.tep_sup_active == 1) {
1278  			memorystatus_update_priority_for_appnap(((proc_t) task->bsd_info), TRUE);
1279  		} else {
1280  			memorystatus_update_priority_for_appnap(((proc_t) task->bsd_info), FALSE);
1281  		}
1282  	}
1283  }
1284  
1285  
1286  /*
1287   * Yet another layering violation. We reach out and bang on the coalition directly.
1288   */
1289  static boolean_t
1290  task_policy_update_coalition_focal_tasks(task_t            task,
1291      int               prev_role,
1292      int               next_role,
1293      task_pend_token_t pend_token)
1294  {
1295  	boolean_t sfi_transition = FALSE;
1296  	uint32_t new_count = 0;
1297  
1298  	/* task moving into/out-of the foreground */
1299  	if (prev_role != TASK_FOREGROUND_APPLICATION && next_role == TASK_FOREGROUND_APPLICATION) {
1300  		if (task_coalition_adjust_focal_count(task, 1, &new_count) && (new_count == 1)) {
1301  			sfi_transition = TRUE;
1302  			pend_token->tpt_update_tg_ui_flag = TRUE;
1303  		}
1304  	} else if (prev_role == TASK_FOREGROUND_APPLICATION && next_role != TASK_FOREGROUND_APPLICATION) {
1305  		if (task_coalition_adjust_focal_count(task, -1, &new_count) && (new_count == 0)) {
1306  			sfi_transition = TRUE;
1307  			pend_token->tpt_update_tg_ui_flag = TRUE;
1308  		}
1309  	}
1310  
1311  	/* task moving into/out-of background */
1312  	if (prev_role != TASK_BACKGROUND_APPLICATION && next_role == TASK_BACKGROUND_APPLICATION) {
1313  		if (task_coalition_adjust_nonfocal_count(task, 1, &new_count) && (new_count == 1)) {
1314  			sfi_transition = TRUE;
1315  		}
1316  	} else if (prev_role == TASK_BACKGROUND_APPLICATION && next_role != TASK_BACKGROUND_APPLICATION) {
1317  		if (task_coalition_adjust_nonfocal_count(task, -1, &new_count) && (new_count == 0)) {
1318  			sfi_transition = TRUE;
1319  		}
1320  	}
1321  
1322  	if (sfi_transition) {
1323  		pend_token->tpt_update_coal_sfi = 1;
1324  	}
1325  	return sfi_transition;
1326  }
1327  
1328  #if CONFIG_SCHED_SFI
1329  
1330  /* coalition object is locked */
1331  static void
1332  task_sfi_reevaluate_cb(coalition_t coal, void *ctx, task_t task)
1333  {
1334  	thread_t thread;
1335  
1336  	/* unused for now */
1337  	(void)coal;
1338  
1339  	/* skip the task we're re-evaluating on behalf of: it's already updated */
1340  	if (task == (task_t)ctx) {
1341  		return;
1342  	}
1343  
1344  	task_lock(task);
1345  
1346  	queue_iterate(&task->threads, thread, thread_t, task_threads) {
1347  		sfi_reevaluate(thread);
1348  	}
1349  
1350  	task_unlock(task);
1351  }
1352  #endif /* CONFIG_SCHED_SFI */
1353  
1354  /*
1355   * Called with task unlocked to do things that can't be done while holding the task lock
1356   */
1357  void
1358  task_policy_update_complete_unlocked(task_t task, task_pend_token_t pend_token)
1359  {
1360  #ifdef MACH_BSD
1361  	if (pend_token->tpt_update_sockets) {
1362  		proc_apply_task_networkbg(task->bsd_info, THREAD_NULL);
1363  	}
1364  #endif /* MACH_BSD */
1365  
1366  	/* The timer throttle has been removed or reduced, we need to look for expired timers and fire them */
1367  	if (pend_token->tpt_update_timers) {
1368  		ml_timer_evaluate();
1369  	}
1370  
1371  #if CONFIG_TASKWATCH
1372  	if (pend_token->tpt_update_watchers) {
1373  		apply_appstate_watchers(task);
1374  	}
1375  #endif /* CONFIG_TASKWATCH */
1376  
1377  	if (pend_token->tpt_update_live_donor) {
1378  		task_importance_update_live_donor(task);
1379  	}
1380  
1381  #if CONFIG_SCHED_SFI
1382  	/* use the resource coalition for SFI re-evaluation */
1383  	if (pend_token->tpt_update_coal_sfi) {
1384  		coalition_for_each_task(task->coalition[COALITION_TYPE_RESOURCE],
1385  		    (void *)task, task_sfi_reevaluate_cb);
1386  	}
1387  #endif /* CONFIG_SCHED_SFI */
1388  
1389  #if CONFIG_THREAD_GROUPS
1390  	if (pend_token->tpt_update_tg_ui_flag) {
1391  		task_coalition_thread_group_focal_update(task);
1392  	}
1393  #endif /* CONFIG_THREAD_GROUPS */
1394  }
1395  
1396  /*
1397   * Initiate a task policy state transition
1398   *
1399   * Everything that modifies requested except functions that need to hold the task lock
1400   * should use this function
1401   *
1402   * Argument validation should be performed before reaching this point.
1403   *
1404   * TODO: Do we need to check task->active?
1405   */
1406  void
1407  proc_set_task_policy(task_t     task,
1408      int        category,
1409      int        flavor,
1410      int        value)
1411  {
1412  	struct task_pend_token pend_token = {};
1413  
1414  	task_lock(task);
1415  
1416  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1417  	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START,
1418  	    task_pid(task), trequested_0(task),
1419  	    trequested_1(task), value, 0);
1420  
1421  	proc_set_task_policy_locked(task, category, flavor, value, 0);
1422  
1423  	task_policy_update_locked(task, &pend_token);
1424  
1425  
1426  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1427  	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END,
1428  	    task_pid(task), trequested_0(task),
1429  	    trequested_1(task), tpending(&pend_token), 0);
1430  
1431  	task_unlock(task);
1432  
1433  	task_policy_update_complete_unlocked(task, &pend_token);
1434  }
1435  
1436  /*
1437   * Variant of proc_set_task_policy() that sets two scalars in the requested policy structure.
1438   * Same locking rules apply.
1439   */
1440  void
1441  proc_set_task_policy2(task_t    task,
1442      int       category,
1443      int       flavor,
1444      int       value,
1445      int       value2)
1446  {
1447  	struct task_pend_token pend_token = {};
1448  
1449  	task_lock(task);
1450  
1451  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1452  	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START,
1453  	    task_pid(task), trequested_0(task),
1454  	    trequested_1(task), value, 0);
1455  
1456  	proc_set_task_policy_locked(task, category, flavor, value, value2);
1457  
1458  	task_policy_update_locked(task, &pend_token);
1459  
1460  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1461  	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END,
1462  	    task_pid(task), trequested_0(task),
1463  	    trequested_1(task), tpending(&pend_token), 0);
1464  
1465  	task_unlock(task);
1466  
1467  	task_policy_update_complete_unlocked(task, &pend_token);
1468  }
1469  
1470  /*
1471   * Set the requested state for a specific flavor to a specific value.
1472   *
1473   *  TODO:
1474   *  Verify that arguments to non iopol things are 1 or 0
1475   */
1476  static void
1477  proc_set_task_policy_locked(task_t      task,
1478      int         category,
1479      int         flavor,
1480      int         value,
1481      int         value2)
1482  {
1483  	int tier, passive;
1484  
1485  	struct task_requested_policy requested = task->requested_policy;
1486  
1487  	switch (flavor) {
1488  	/* Category: EXTERNAL and INTERNAL */
1489  
1490  	case TASK_POLICY_DARWIN_BG:
1491  		if (category == TASK_POLICY_EXTERNAL) {
1492  			requested.trp_ext_darwinbg = value;
1493  		} else {
1494  			requested.trp_int_darwinbg = value;
1495  		}
1496  		break;
1497  
1498  	case TASK_POLICY_IOPOL:
1499  		proc_iopol_to_tier(value, &tier, &passive);
1500  		if (category == TASK_POLICY_EXTERNAL) {
1501  			requested.trp_ext_iotier  = tier;
1502  			requested.trp_ext_iopassive = passive;
1503  		} else {
1504  			requested.trp_int_iotier  = tier;
1505  			requested.trp_int_iopassive = passive;
1506  		}
1507  		break;
1508  
1509  	case TASK_POLICY_IO:
1510  		if (category == TASK_POLICY_EXTERNAL) {
1511  			requested.trp_ext_iotier = value;
1512  		} else {
1513  			requested.trp_int_iotier = value;
1514  		}
1515  		break;
1516  
1517  	case TASK_POLICY_PASSIVE_IO:
1518  		if (category == TASK_POLICY_EXTERNAL) {
1519  			requested.trp_ext_iopassive = value;
1520  		} else {
1521  			requested.trp_int_iopassive = value;
1522  		}
1523  		break;
1524  
1525  	/* Category: INTERNAL */
1526  
1527  	case TASK_POLICY_DARWIN_BG_IOPOL:
1528  		assert(category == TASK_POLICY_INTERNAL);
1529  		proc_iopol_to_tier(value, &tier, &passive);
1530  		requested.trp_bg_iotier = tier;
1531  		break;
1532  
1533  	/* Category: ATTRIBUTE */
1534  
1535  	case TASK_POLICY_BOOST:
1536  		assert(category == TASK_POLICY_ATTRIBUTE);
1537  		requested.trp_boosted = value;
1538  		break;
1539  
1540  	case TASK_POLICY_ROLE:
1541  		assert(category == TASK_POLICY_ATTRIBUTE);
1542  		requested.trp_role = value;
1543  		break;
1544  
1545  	case TASK_POLICY_TERMINATED:
1546  		assert(category == TASK_POLICY_ATTRIBUTE);
1547  		requested.trp_terminated = value;
1548  		break;
1549  
1550  	case TASK_BASE_LATENCY_QOS_POLICY:
1551  		assert(category == TASK_POLICY_ATTRIBUTE);
1552  		requested.trp_base_latency_qos = value;
1553  		break;
1554  
1555  	case TASK_BASE_THROUGHPUT_QOS_POLICY:
1556  		assert(category == TASK_POLICY_ATTRIBUTE);
1557  		requested.trp_base_through_qos = value;
1558  		break;
1559  
1560  	case TASK_POLICY_SFI_MANAGED:
1561  		assert(category == TASK_POLICY_ATTRIBUTE);
1562  		requested.trp_sfi_managed = value;
1563  		break;
1564  
1565  	case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS:
1566  		assert(category == TASK_POLICY_ATTRIBUTE);
1567  		requested.trp_base_latency_qos = value;
1568  		requested.trp_base_through_qos = value2;
1569  		break;
1570  
1571  	case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS:
1572  		assert(category == TASK_POLICY_ATTRIBUTE);
1573  		requested.trp_over_latency_qos = value;
1574  		requested.trp_over_through_qos = value2;
1575  		break;
1576  
1577  	default:
1578  		panic("unknown task policy: %d %d %d %d", category, flavor, value, value2);
1579  		break;
1580  	}
1581  
1582  	task->requested_policy = requested;
1583  }
1584  
1585  /*
1586   * Gets what you set. Effective values may be different.
1587   */
1588  int
1589  proc_get_task_policy(task_t     task,
1590      int        category,
1591      int        flavor)
1592  {
1593  	int value = 0;
1594  
1595  	task_lock(task);
1596  
1597  	struct task_requested_policy requested = task->requested_policy;
1598  
1599  	switch (flavor) {
1600  	case TASK_POLICY_DARWIN_BG:
1601  		if (category == TASK_POLICY_EXTERNAL) {
1602  			value = requested.trp_ext_darwinbg;
1603  		} else {
1604  			value = requested.trp_int_darwinbg;
1605  		}
1606  		break;
1607  	case TASK_POLICY_IOPOL:
1608  		if (category == TASK_POLICY_EXTERNAL) {
1609  			value = proc_tier_to_iopol(requested.trp_ext_iotier,
1610  			    requested.trp_ext_iopassive);
1611  		} else {
1612  			value = proc_tier_to_iopol(requested.trp_int_iotier,
1613  			    requested.trp_int_iopassive);
1614  		}
1615  		break;
1616  	case TASK_POLICY_IO:
1617  		if (category == TASK_POLICY_EXTERNAL) {
1618  			value = requested.trp_ext_iotier;
1619  		} else {
1620  			value = requested.trp_int_iotier;
1621  		}
1622  		break;
1623  	case TASK_POLICY_PASSIVE_IO:
1624  		if (category == TASK_POLICY_EXTERNAL) {
1625  			value = requested.trp_ext_iopassive;
1626  		} else {
1627  			value = requested.trp_int_iopassive;
1628  		}
1629  		break;
1630  	case TASK_POLICY_DARWIN_BG_IOPOL:
1631  		assert(category == TASK_POLICY_INTERNAL);
1632  		value = proc_tier_to_iopol(requested.trp_bg_iotier, 0);
1633  		break;
1634  	case TASK_POLICY_ROLE:
1635  		assert(category == TASK_POLICY_ATTRIBUTE);
1636  		value = requested.trp_role;
1637  		break;
1638  	case TASK_POLICY_SFI_MANAGED:
1639  		assert(category == TASK_POLICY_ATTRIBUTE);
1640  		value = requested.trp_sfi_managed;
1641  		break;
1642  	default:
1643  		panic("unknown policy_flavor %d", flavor);
1644  		break;
1645  	}
1646  
1647  	task_unlock(task);
1648  
1649  	return value;
1650  }
1651  
1652  /*
1653   * Variant of proc_get_task_policy() that returns two scalar outputs.
1654   */
1655  void
1656  proc_get_task_policy2(task_t task,
1657      __assert_only int category,
1658      int flavor,
1659      int *value1,
1660      int *value2)
1661  {
1662  	task_lock(task);
1663  
1664  	struct task_requested_policy requested = task->requested_policy;
1665  
1666  	switch (flavor) {
1667  	case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS:
1668  		assert(category == TASK_POLICY_ATTRIBUTE);
1669  		*value1 = requested.trp_base_latency_qos;
1670  		*value2 = requested.trp_base_through_qos;
1671  		break;
1672  
1673  	case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS:
1674  		assert(category == TASK_POLICY_ATTRIBUTE);
1675  		*value1 = requested.trp_over_latency_qos;
1676  		*value2 = requested.trp_over_through_qos;
1677  		break;
1678  
1679  	default:
1680  		panic("unknown policy_flavor %d", flavor);
1681  		break;
1682  	}
1683  
1684  	task_unlock(task);
1685  }
1686  
1687  /*
1688   * Function for querying effective state for relevant subsystems
1689   * Gets what is actually in effect, for subsystems which pull policy instead of receive updates.
1690   *
1691   * ONLY the relevant subsystem should query this.
1692   * NEVER take a value from the 'effective' function and stuff it into a setter.
1693   *
1694   * NOTE: This accessor does not take the task lock.
1695   * Notifications of state updates need to be externally synchronized with state queries.
1696   * This routine *MUST* remain interrupt safe, as it is potentially invoked
1697   * within the context of a timer interrupt.  It is also called in KDP context for stackshot.
1698   */
1699  int
1700  proc_get_effective_task_policy(task_t   task,
1701      int      flavor)
1702  {
1703  	int value = 0;
1704  
1705  	switch (flavor) {
1706  	case TASK_POLICY_DARWIN_BG:
1707  		/*
1708  		 * This backs the KPI call proc_pidbackgrounded to find
1709  		 * out if a pid is backgrounded.
1710  		 * It is used to communicate state to the VM system, as well as
1711  		 * prioritizing requests to the graphics system.
1712  		 * Returns 1 for background mode, 0 for normal mode
1713  		 */
1714  		value = task->effective_policy.tep_darwinbg;
1715  		break;
1716  	case TASK_POLICY_ALL_SOCKETS_BG:
1717  		/*
1718  		 * do_background_socket() calls this to determine what it should do to the proc's sockets
1719  		 * Returns 1 for background mode, 0 for normal mode
1720  		 *
1721  		 * This consults both thread and task so un-DBGing a thread while the task is BG
1722  		 * doesn't get you out of the network throttle.
1723  		 */
1724  		value = task->effective_policy.tep_all_sockets_bg;
1725  		break;
1726  	case TASK_POLICY_SUP_ACTIVE:
1727  		/*
1728  		 * Is the task in AppNap? This is used to determine the urgency
1729  		 * that's passed to the performance management subsystem for threads
1730  		 * that are running at a priority <= MAXPRI_THROTTLE.
1731  		 */
1732  		value = task->effective_policy.tep_sup_active;
1733  		break;
1734  	case TASK_POLICY_LATENCY_QOS:
1735  		/*
1736  		 * timer arming calls into here to find out the timer coalescing level
1737  		 * Returns a QoS tier (0-6)
1738  		 */
1739  		value = task->effective_policy.tep_latency_qos;
1740  		break;
1741  	case TASK_POLICY_THROUGH_QOS:
1742  		/*
1743  		 * This value is passed into the urgency callout from the scheduler
1744  		 * to the performance management subsystem.
1745  		 * Returns a QoS tier (0-6)
1746  		 */
1747  		value = task->effective_policy.tep_through_qos;
1748  		break;
1749  	case TASK_POLICY_ROLE:
1750  		/*
1751  		 * This controls various things that ask whether a process is foreground,
1752  		 * like SFI, VM, access to GPU, etc
1753  		 */
1754  		value = task->effective_policy.tep_role;
1755  		break;
1756  	case TASK_POLICY_WATCHERS_BG:
1757  		/*
1758  		 * This controls whether or not a thread watching this process should be BG.
1759  		 */
1760  		value = task->effective_policy.tep_watchers_bg;
1761  		break;
1762  	case TASK_POLICY_SFI_MANAGED:
1763  		/*
1764  		 * This controls whether or not a process is targeted for specific control by thermald.
1765  		 */
1766  		value = task->effective_policy.tep_sfi_managed;
1767  		break;
1768  	default:
1769  		panic("unknown policy_flavor %d", flavor);
1770  		break;
1771  	}
1772  
1773  	return value;
1774  }
1775  
1776  /*
1777   * Convert from IOPOL_* values to throttle tiers.
1778   *
1779   * TODO: Can this be made more compact, like an array lookup
1780   * Note that it is possible to support e.g. IOPOL_PASSIVE_STANDARD in the future
1781   */
1782  
1783  void
1784  proc_iopol_to_tier(int iopolicy, int *tier, int *passive)
1785  {
1786  	*passive = 0;
1787  	*tier = 0;
1788  	switch (iopolicy) {
1789  	case IOPOL_IMPORTANT:
1790  		*tier = THROTTLE_LEVEL_TIER0;
1791  		break;
1792  	case IOPOL_PASSIVE:
1793  		*tier = THROTTLE_LEVEL_TIER0;
1794  		*passive = 1;
1795  		break;
1796  	case IOPOL_STANDARD:
1797  		*tier = THROTTLE_LEVEL_TIER1;
1798  		break;
1799  	case IOPOL_UTILITY:
1800  		*tier = THROTTLE_LEVEL_TIER2;
1801  		break;
1802  	case IOPOL_THROTTLE:
1803  		*tier = THROTTLE_LEVEL_TIER3;
1804  		break;
1805  	default:
1806  		panic("unknown I/O policy %d", iopolicy);
1807  		break;
1808  	}
1809  }
1810  
1811  int
1812  proc_tier_to_iopol(int tier, int passive)
1813  {
1814  	if (passive == 1) {
1815  		switch (tier) {
1816  		case THROTTLE_LEVEL_TIER0:
1817  			return IOPOL_PASSIVE;
1818  		default:
1819  			panic("unknown passive tier %d", tier);
1820  			return IOPOL_DEFAULT;
1821  		}
1822  	} else {
1823  		switch (tier) {
1824  		case THROTTLE_LEVEL_NONE:
1825  		case THROTTLE_LEVEL_TIER0:
1826  			return IOPOL_DEFAULT;
1827  		case THROTTLE_LEVEL_TIER1:
1828  			return IOPOL_STANDARD;
1829  		case THROTTLE_LEVEL_TIER2:
1830  			return IOPOL_UTILITY;
1831  		case THROTTLE_LEVEL_TIER3:
1832  			return IOPOL_THROTTLE;
1833  		default:
1834  			panic("unknown tier %d", tier);
1835  			return IOPOL_DEFAULT;
1836  		}
1837  	}
1838  }
1839  
1840  int
1841  proc_darwin_role_to_task_role(int darwin_role, task_role_t* task_role)
1842  {
1843  	integer_t role = TASK_UNSPECIFIED;
1844  
1845  	switch (darwin_role) {
1846  	case PRIO_DARWIN_ROLE_DEFAULT:
1847  		role = TASK_UNSPECIFIED;
1848  		break;
1849  	case PRIO_DARWIN_ROLE_UI_FOCAL:
1850  		role = TASK_FOREGROUND_APPLICATION;
1851  		break;
1852  	case PRIO_DARWIN_ROLE_UI:
1853  		role = TASK_DEFAULT_APPLICATION;
1854  		break;
1855  	case PRIO_DARWIN_ROLE_NON_UI:
1856  		role = TASK_NONUI_APPLICATION;
1857  		break;
1858  	case PRIO_DARWIN_ROLE_UI_NON_FOCAL:
1859  		role = TASK_BACKGROUND_APPLICATION;
1860  		break;
1861  	case PRIO_DARWIN_ROLE_TAL_LAUNCH:
1862  		role = TASK_THROTTLE_APPLICATION;
1863  		break;
1864  	case PRIO_DARWIN_ROLE_DARWIN_BG:
1865  		role = TASK_DARWINBG_APPLICATION;
1866  		break;
1867  	default:
1868  		return EINVAL;
1869  	}
1870  
1871  	*task_role = role;
1872  
1873  	return 0;
1874  }
1875  
1876  int
1877  proc_task_role_to_darwin_role(task_role_t task_role)
1878  {
1879  	switch (task_role) {
1880  	case TASK_FOREGROUND_APPLICATION:
1881  		return PRIO_DARWIN_ROLE_UI_FOCAL;
1882  	case TASK_BACKGROUND_APPLICATION:
1883  		return PRIO_DARWIN_ROLE_UI_NON_FOCAL;
1884  	case TASK_NONUI_APPLICATION:
1885  		return PRIO_DARWIN_ROLE_NON_UI;
1886  	case TASK_DEFAULT_APPLICATION:
1887  		return PRIO_DARWIN_ROLE_UI;
1888  	case TASK_THROTTLE_APPLICATION:
1889  		return PRIO_DARWIN_ROLE_TAL_LAUNCH;
1890  	case TASK_DARWINBG_APPLICATION:
1891  		return PRIO_DARWIN_ROLE_DARWIN_BG;
1892  	case TASK_UNSPECIFIED:
1893  	default:
1894  		return PRIO_DARWIN_ROLE_DEFAULT;
1895  	}
1896  }
1897  
1898  
1899  /* TODO: remove this variable when interactive daemon audit period is over */
1900  static TUNABLE(bool, ipc_importance_interactive_receiver,
1901      "imp_interactive_receiver", false);
1902  
1903  /*
1904   * Called at process exec to initialize the apptype, qos clamp, and qos seed of a process
1905   *
1906   * TODO: Make this function more table-driven instead of ad-hoc
1907   */
1908  void
1909  proc_set_task_spawnpolicy(task_t task, thread_t thread, int apptype, int qos_clamp, task_role_t role,
1910      ipc_port_t * portwatch_ports, uint32_t portwatch_count)
1911  {
1912  	struct task_pend_token pend_token = {};
1913  
1914  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1915  	    (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_START,
1916  	    task_pid(task), trequested_0(task), trequested_1(task),
1917  	    apptype, 0);
1918  
1919  	switch (apptype) {
1920  	case TASK_APPTYPE_APP_DEFAULT:
1921  		/* Apps become donors via the 'live-donor' flag instead of the static donor flag */
1922  		task_importance_mark_donor(task, FALSE);
1923  		task_importance_mark_live_donor(task, TRUE);
1924  		task_importance_mark_receiver(task, FALSE);
1925  #if !defined(XNU_TARGET_OS_OSX)
1926  		task_importance_mark_denap_receiver(task, FALSE);
1927  #else
1928  		/* Apps are de-nap recievers on macOS for suppression behaviors */
1929  		task_importance_mark_denap_receiver(task, TRUE);
1930  #endif /* !defined(XNU_TARGET_OS_OSX) */
1931  		break;
1932  
1933  	case TASK_APPTYPE_DAEMON_INTERACTIVE:
1934  		task_importance_mark_donor(task, TRUE);
1935  		task_importance_mark_live_donor(task, FALSE);
1936  
1937  		/*
1938  		 * A boot arg controls whether interactive daemons are importance receivers.
1939  		 * Normally, they are not.  But for testing their behavior as an adaptive
1940  		 * daemon, the boot-arg can be set.
1941  		 *
1942  		 * TODO: remove this when the interactive daemon audit period is over.
1943  		 */
1944  		task_importance_mark_receiver(task, /* FALSE */ ipc_importance_interactive_receiver);
1945  		task_importance_mark_denap_receiver(task, FALSE);
1946  		break;
1947  
1948  	case TASK_APPTYPE_DAEMON_STANDARD:
1949  		task_importance_mark_donor(task, TRUE);
1950  		task_importance_mark_live_donor(task, FALSE);
1951  		task_importance_mark_receiver(task, FALSE);
1952  		task_importance_mark_denap_receiver(task, FALSE);
1953  		break;
1954  
1955  	case TASK_APPTYPE_DAEMON_ADAPTIVE:
1956  		task_importance_mark_donor(task, FALSE);
1957  		task_importance_mark_live_donor(task, FALSE);
1958  		task_importance_mark_receiver(task, TRUE);
1959  		task_importance_mark_denap_receiver(task, FALSE);
1960  		break;
1961  
1962  	case TASK_APPTYPE_DAEMON_BACKGROUND:
1963  		task_importance_mark_donor(task, FALSE);
1964  		task_importance_mark_live_donor(task, FALSE);
1965  		task_importance_mark_receiver(task, FALSE);
1966  		task_importance_mark_denap_receiver(task, FALSE);
1967  		break;
1968  
1969  	case TASK_APPTYPE_DRIVER:
1970  		task_importance_mark_donor(task, FALSE);
1971  		task_importance_mark_live_donor(task, FALSE);
1972  		task_importance_mark_receiver(task, FALSE);
1973  		task_importance_mark_denap_receiver(task, FALSE);
1974  		break;
1975  
1976  	case TASK_APPTYPE_NONE:
1977  		break;
1978  	}
1979  
1980  	if (portwatch_ports != NULL && apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) {
1981  		int portwatch_boosts = 0;
1982  
1983  		for (uint32_t i = 0; i < portwatch_count; i++) {
1984  			ipc_port_t port = NULL;
1985  
1986  			if (IP_VALID(port = portwatch_ports[i])) {
1987  				int boost = 0;
1988  				task_add_importance_watchport(task, port, &boost);
1989  				portwatch_boosts += boost;
1990  			}
1991  		}
1992  
1993  		if (portwatch_boosts > 0) {
1994  			task_importance_hold_internal_assertion(task, portwatch_boosts);
1995  		}
1996  	}
1997  
1998  	/* Redirect the turnstile push of watchports to task */
1999  	if (portwatch_count && portwatch_ports != NULL) {
2000  		task_add_turnstile_watchports(task, thread, portwatch_ports, portwatch_count);
2001  	}
2002  
2003  	task_lock(task);
2004  
2005  	if (apptype != TASK_APPTYPE_NONE) {
2006  		task->requested_policy.trp_apptype = apptype;
2007  	}
2008  
2009  #if !defined(XNU_TARGET_OS_OSX)
2010  	/* Remove this after launchd starts setting it properly */
2011  	if (apptype == TASK_APPTYPE_APP_DEFAULT && role == TASK_UNSPECIFIED) {
2012  		task->requested_policy.trp_role = TASK_FOREGROUND_APPLICATION;
2013  	} else
2014  #endif
2015  	if (role != TASK_UNSPECIFIED) {
2016  		task->requested_policy.trp_role = (uint32_t)role;
2017  	}
2018  
2019  	if (qos_clamp != THREAD_QOS_UNSPECIFIED) {
2020  		task->requested_policy.trp_qos_clamp = qos_clamp;
2021  	}
2022  
2023  	task_policy_update_locked(task, &pend_token);
2024  
2025  	task_unlock(task);
2026  
2027  	/* Ensure the donor bit is updated to be in sync with the new live donor status */
2028  	pend_token.tpt_update_live_donor = 1;
2029  
2030  	task_policy_update_complete_unlocked(task, &pend_token);
2031  
2032  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2033  	    (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_END,
2034  	    task_pid(task), trequested_0(task), trequested_1(task),
2035  	    task_is_importance_receiver(task), 0);
2036  }
2037  
2038  /*
2039   * Inherit task role across exec
2040   */
2041  void
2042  proc_inherit_task_role(task_t new_task,
2043      task_t old_task)
2044  {
2045  	int role;
2046  
2047  	/* inherit the role from old task to new task */
2048  	role = proc_get_task_policy(old_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
2049  	proc_set_task_policy(new_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, role);
2050  }
2051  
2052  extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
2053  
2054  /*
2055   * Compute the default main thread qos for a task
2056   */
2057  thread_qos_t
2058  task_compute_main_thread_qos(task_t task)
2059  {
2060  	thread_qos_t primordial_qos = THREAD_QOS_UNSPECIFIED;
2061  
2062  	thread_qos_t qos_clamp = task->requested_policy.trp_qos_clamp;
2063  
2064  	switch (task->requested_policy.trp_apptype) {
2065  	case TASK_APPTYPE_APP_TAL:
2066  	case TASK_APPTYPE_APP_DEFAULT:
2067  		primordial_qos = THREAD_QOS_USER_INTERACTIVE;
2068  		break;
2069  
2070  	case TASK_APPTYPE_DAEMON_INTERACTIVE:
2071  	case TASK_APPTYPE_DAEMON_STANDARD:
2072  	case TASK_APPTYPE_DAEMON_ADAPTIVE:
2073  	case TASK_APPTYPE_DRIVER:
2074  		primordial_qos = THREAD_QOS_LEGACY;
2075  		break;
2076  
2077  	case TASK_APPTYPE_DAEMON_BACKGROUND:
2078  		primordial_qos = THREAD_QOS_BACKGROUND;
2079  		break;
2080  	}
2081  
2082  	if (task->bsd_info == initproc) {
2083  		/* PID 1 gets a special case */
2084  		primordial_qos = MAX(primordial_qos, THREAD_QOS_USER_INITIATED);
2085  	}
2086  
2087  	if (qos_clamp != THREAD_QOS_UNSPECIFIED) {
2088  		if (primordial_qos != THREAD_QOS_UNSPECIFIED) {
2089  			primordial_qos = MIN(qos_clamp, primordial_qos);
2090  		} else {
2091  			primordial_qos = qos_clamp;
2092  		}
2093  	}
2094  
2095  	return primordial_qos;
2096  }
2097  
2098  
2099  /* for process_policy to check before attempting to set */
2100  boolean_t
2101  proc_task_is_tal(task_t task)
2102  {
2103  	return (task->requested_policy.trp_apptype == TASK_APPTYPE_APP_TAL) ? TRUE : FALSE;
2104  }
2105  
2106  int
2107  task_get_apptype(task_t task)
2108  {
2109  	return task->requested_policy.trp_apptype;
2110  }
2111  
2112  boolean_t
2113  task_is_daemon(task_t task)
2114  {
2115  	switch (task->requested_policy.trp_apptype) {
2116  	case TASK_APPTYPE_DAEMON_INTERACTIVE:
2117  	case TASK_APPTYPE_DAEMON_STANDARD:
2118  	case TASK_APPTYPE_DAEMON_ADAPTIVE:
2119  	case TASK_APPTYPE_DAEMON_BACKGROUND:
2120  		return TRUE;
2121  	default:
2122  		return FALSE;
2123  	}
2124  }
2125  
2126  bool
2127  task_is_driver(task_t task)
2128  {
2129  	if (!task) {
2130  		return FALSE;
2131  	}
2132  	return task->requested_policy.trp_apptype == TASK_APPTYPE_DRIVER;
2133  }
2134  
2135  boolean_t
2136  task_is_app(task_t task)
2137  {
2138  	switch (task->requested_policy.trp_apptype) {
2139  	case TASK_APPTYPE_APP_DEFAULT:
2140  	case TASK_APPTYPE_APP_TAL:
2141  		return TRUE;
2142  	default:
2143  		return FALSE;
2144  	}
2145  }
2146  
2147  /* for telemetry */
2148  integer_t
2149  task_grab_latency_qos(task_t task)
2150  {
2151  	return qos_latency_policy_package(proc_get_effective_task_policy(task, TASK_POLICY_LATENCY_QOS));
2152  }
2153  
2154  /* update the darwin background action state in the flags field for libproc */
2155  int
2156  proc_get_darwinbgstate(task_t task, uint32_t * flagsp)
2157  {
2158  	if (task->requested_policy.trp_ext_darwinbg) {
2159  		*flagsp |= PROC_FLAG_EXT_DARWINBG;
2160  	}
2161  
2162  	if (task->requested_policy.trp_int_darwinbg) {
2163  		*flagsp |= PROC_FLAG_DARWINBG;
2164  	}
2165  
2166  #if !defined(XNU_TARGET_OS_OSX)
2167  	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) {
2168  		*flagsp |= PROC_FLAG_IOS_APPLEDAEMON;
2169  	}
2170  
2171  	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) {
2172  		*flagsp |= PROC_FLAG_IOS_IMPPROMOTION;
2173  	}
2174  #endif /* !defined(XNU_TARGET_OS_OSX) */
2175  
2176  	if (task->requested_policy.trp_apptype == TASK_APPTYPE_APP_DEFAULT ||
2177  	    task->requested_policy.trp_apptype == TASK_APPTYPE_APP_TAL) {
2178  		*flagsp |= PROC_FLAG_APPLICATION;
2179  	}
2180  
2181  	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) {
2182  		*flagsp |= PROC_FLAG_ADAPTIVE;
2183  	}
2184  
2185  	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE &&
2186  	    task->requested_policy.trp_boosted == 1) {
2187  		*flagsp |= PROC_FLAG_ADAPTIVE_IMPORTANT;
2188  	}
2189  
2190  	if (task_is_importance_donor(task)) {
2191  		*flagsp |= PROC_FLAG_IMPORTANCE_DONOR;
2192  	}
2193  
2194  	if (task->effective_policy.tep_sup_active) {
2195  		*flagsp |= PROC_FLAG_SUPPRESSED;
2196  	}
2197  
2198  	return 0;
2199  }
2200  
2201  /*
2202   * Tracepoint data... Reading the tracepoint data can be somewhat complicated.
2203   * The current scheme packs as much data into a single tracepoint as it can.
2204   *
2205   * Each task/thread requested/effective structure is 64 bits in size. Any
2206   * given tracepoint will emit either requested or effective data, but not both.
2207   *
2208   * A tracepoint may emit any of task, thread, or task & thread data.
2209   *
2210   * The type of data emitted varies with pointer size. Where possible, both
2211   * task and thread data are emitted. In LP32 systems, the first and second
2212   * halves of either the task or thread data is emitted.
2213   *
2214   * The code uses uintptr_t array indexes instead of high/low to avoid
2215   * confusion WRT big vs little endian.
2216   *
2217   * The truth table for the tracepoint data functions is below, and has the
2218   * following invariants:
2219   *
2220   * 1) task and thread are uintptr_t*
2221   * 2) task may never be NULL
2222   *
2223   *
2224   *                                     LP32            LP64
2225   * trequested_0(task, NULL)            task[0]         task[0]
2226   * trequested_1(task, NULL)            task[1]         NULL
2227   * trequested_0(task, thread)          thread[0]       task[0]
2228   * trequested_1(task, thread)          thread[1]       thread[0]
2229   *
2230   * Basically, you get a full task or thread on LP32, and both on LP64.
2231   *
2232   * The uintptr_t munging here is squicky enough to deserve a comment.
2233   *
2234   * The variables we are accessing are laid out in memory like this:
2235   *
2236   * [            LP64 uintptr_t  0          ]
2237   * [ LP32 uintptr_t 0 ] [ LP32 uintptr_t 1 ]
2238   *
2239   *      1   2   3   4     5   6   7   8
2240   *
2241   */
2242  
2243  static uintptr_t
2244  trequested_0(task_t task)
2245  {
2246  	static_assert(sizeof(struct task_requested_policy) == sizeof(uint64_t), "size invariant violated");
2247  
2248  	uintptr_t* raw = (uintptr_t*)&task->requested_policy;
2249  
2250  	return raw[0];
2251  }
2252  
2253  static uintptr_t
2254  trequested_1(task_t task)
2255  {
2256  #if defined __LP64__
2257  	(void)task;
2258  	return 0;
2259  #else
2260  	uintptr_t* raw = (uintptr_t*)(&task->requested_policy);
2261  	return raw[1];
2262  #endif
2263  }
2264  
2265  static uintptr_t
2266  teffective_0(task_t task)
2267  {
2268  	uintptr_t* raw = (uintptr_t*)&task->effective_policy;
2269  
2270  	return raw[0];
2271  }
2272  
2273  static uintptr_t
2274  teffective_1(task_t task)
2275  {
2276  #if defined __LP64__
2277  	(void)task;
2278  	return 0;
2279  #else
2280  	uintptr_t* raw = (uintptr_t*)(&task->effective_policy);
2281  	return raw[1];
2282  #endif
2283  }
2284  
2285  /* dump pending for tracepoint */
2286  uint32_t
2287  tpending(task_pend_token_t pend_token)
2288  {
2289  	return *(uint32_t*)(void*)(pend_token);
2290  }
2291  
2292  uint64_t
2293  task_requested_bitfield(task_t task)
2294  {
2295  	uint64_t bits = 0;
2296  	struct task_requested_policy requested = task->requested_policy;
2297  
2298  	bits |= (requested.trp_int_darwinbg     ? POLICY_REQ_INT_DARWIN_BG  : 0);
2299  	bits |= (requested.trp_ext_darwinbg     ? POLICY_REQ_EXT_DARWIN_BG  : 0);
2300  	bits |= (requested.trp_int_iotier       ? (((uint64_t)requested.trp_int_iotier) << POLICY_REQ_INT_IO_TIER_SHIFT) : 0);
2301  	bits |= (requested.trp_ext_iotier       ? (((uint64_t)requested.trp_ext_iotier) << POLICY_REQ_EXT_IO_TIER_SHIFT) : 0);
2302  	bits |= (requested.trp_int_iopassive    ? POLICY_REQ_INT_PASSIVE_IO : 0);
2303  	bits |= (requested.trp_ext_iopassive    ? POLICY_REQ_EXT_PASSIVE_IO : 0);
2304  	bits |= (requested.trp_bg_iotier        ? (((uint64_t)requested.trp_bg_iotier) << POLICY_REQ_BG_IOTIER_SHIFT)   : 0);
2305  	bits |= (requested.trp_terminated       ? POLICY_REQ_TERMINATED     : 0);
2306  
2307  	bits |= (requested.trp_boosted          ? POLICY_REQ_BOOSTED        : 0);
2308  	bits |= (requested.trp_tal_enabled      ? POLICY_REQ_TAL_ENABLED    : 0);
2309  	bits |= (requested.trp_apptype          ? (((uint64_t)requested.trp_apptype) << POLICY_REQ_APPTYPE_SHIFT)  : 0);
2310  	bits |= (requested.trp_role             ? (((uint64_t)requested.trp_role) << POLICY_REQ_ROLE_SHIFT)     : 0);
2311  
2312  	bits |= (requested.trp_sup_active       ? POLICY_REQ_SUP_ACTIVE         : 0);
2313  	bits |= (requested.trp_sup_lowpri_cpu   ? POLICY_REQ_SUP_LOWPRI_CPU     : 0);
2314  	bits |= (requested.trp_sup_cpu          ? POLICY_REQ_SUP_CPU            : 0);
2315  	bits |= (requested.trp_sup_timer        ? (((uint64_t)requested.trp_sup_timer) << POLICY_REQ_SUP_TIMER_THROTTLE_SHIFT) : 0);
2316  	bits |= (requested.trp_sup_throughput   ? (((uint64_t)requested.trp_sup_throughput) << POLICY_REQ_SUP_THROUGHPUT_SHIFT)     : 0);
2317  	bits |= (requested.trp_sup_disk         ? POLICY_REQ_SUP_DISK_THROTTLE  : 0);
2318  	bits |= (requested.trp_sup_bg_sockets   ? POLICY_REQ_SUP_BG_SOCKETS     : 0);
2319  
2320  	bits |= (requested.trp_base_latency_qos ? (((uint64_t)requested.trp_base_latency_qos) << POLICY_REQ_BASE_LATENCY_QOS_SHIFT) : 0);
2321  	bits |= (requested.trp_over_latency_qos ? (((uint64_t)requested.trp_over_latency_qos) << POLICY_REQ_OVER_LATENCY_QOS_SHIFT) : 0);
2322  	bits |= (requested.trp_base_through_qos ? (((uint64_t)requested.trp_base_through_qos) << POLICY_REQ_BASE_THROUGH_QOS_SHIFT) : 0);
2323  	bits |= (requested.trp_over_through_qos ? (((uint64_t)requested.trp_over_through_qos) << POLICY_REQ_OVER_THROUGH_QOS_SHIFT) : 0);
2324  	bits |= (requested.trp_sfi_managed      ? POLICY_REQ_SFI_MANAGED        : 0);
2325  	bits |= (requested.trp_qos_clamp        ? (((uint64_t)requested.trp_qos_clamp) << POLICY_REQ_QOS_CLAMP_SHIFT)        : 0);
2326  
2327  	return bits;
2328  }
2329  
2330  uint64_t
2331  task_effective_bitfield(task_t task)
2332  {
2333  	uint64_t bits = 0;
2334  	struct task_effective_policy effective = task->effective_policy;
2335  
2336  	bits |= (effective.tep_io_tier          ? (((uint64_t)effective.tep_io_tier) << POLICY_EFF_IO_TIER_SHIFT) : 0);
2337  	bits |= (effective.tep_io_passive       ? POLICY_EFF_IO_PASSIVE     : 0);
2338  	bits |= (effective.tep_darwinbg         ? POLICY_EFF_DARWIN_BG      : 0);
2339  	bits |= (effective.tep_lowpri_cpu       ? POLICY_EFF_LOWPRI_CPU     : 0);
2340  	bits |= (effective.tep_terminated       ? POLICY_EFF_TERMINATED     : 0);
2341  	bits |= (effective.tep_all_sockets_bg   ? POLICY_EFF_ALL_SOCKETS_BG : 0);
2342  	bits |= (effective.tep_new_sockets_bg   ? POLICY_EFF_NEW_SOCKETS_BG : 0);
2343  	bits |= (effective.tep_bg_iotier        ? (((uint64_t)effective.tep_bg_iotier) << POLICY_EFF_BG_IOTIER_SHIFT) : 0);
2344  	bits |= (effective.tep_qos_ui_is_urgent ? POLICY_EFF_QOS_UI_IS_URGENT : 0);
2345  
2346  	bits |= (effective.tep_tal_engaged      ? POLICY_EFF_TAL_ENGAGED    : 0);
2347  	bits |= (effective.tep_watchers_bg      ? POLICY_EFF_WATCHERS_BG    : 0);
2348  	bits |= (effective.tep_sup_active       ? POLICY_EFF_SUP_ACTIVE     : 0);
2349  	bits |= (effective.tep_suppressed_cpu   ? POLICY_EFF_SUP_CPU        : 0);
2350  	bits |= (effective.tep_role             ? (((uint64_t)effective.tep_role) << POLICY_EFF_ROLE_SHIFT)        : 0);
2351  	bits |= (effective.tep_latency_qos      ? (((uint64_t)effective.tep_latency_qos) << POLICY_EFF_LATENCY_QOS_SHIFT) : 0);
2352  	bits |= (effective.tep_through_qos      ? (((uint64_t)effective.tep_through_qos) << POLICY_EFF_THROUGH_QOS_SHIFT) : 0);
2353  	bits |= (effective.tep_sfi_managed      ? POLICY_EFF_SFI_MANAGED    : 0);
2354  	bits |= (effective.tep_qos_ceiling      ? (((uint64_t)effective.tep_qos_ceiling) << POLICY_EFF_QOS_CEILING_SHIFT) : 0);
2355  
2356  	return bits;
2357  }
2358  
2359  
2360  /*
2361   * Resource usage and CPU related routines
2362   */
2363  
2364  int
2365  proc_get_task_ruse_cpu(task_t task, uint32_t *policyp, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep)
2366  {
2367  	int error = 0;
2368  	int scope;
2369  
2370  	task_lock(task);
2371  
2372  
2373  	error = task_get_cpuusage(task, percentagep, intervalp, deadlinep, &scope);
2374  	task_unlock(task);
2375  
2376  	/*
2377  	 * Reverse-map from CPU resource limit scopes back to policies (see comment below).
2378  	 */
2379  	if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2380  		*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC;
2381  	} else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) {
2382  		*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE;
2383  	} else if (scope == TASK_RUSECPU_FLAGS_DEADLINE) {
2384  		*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2385  	}
2386  
2387  	return error;
2388  }
2389  
2390  /*
2391   * Configure the default CPU usage monitor parameters.
2392   *
2393   * For tasks which have this mechanism activated: if any thread in the
2394   * process consumes more CPU than this, an EXC_RESOURCE exception will be generated.
2395   */
2396  void
2397  proc_init_cpumon_params(void)
2398  {
2399  	/*
2400  	 * The max CPU percentage can be configured via the boot-args and
2401  	 * a key in the device tree. The boot-args are honored first, then the
2402  	 * device tree.
2403  	 */
2404  	if (!PE_parse_boot_argn("max_cpumon_percentage", &proc_max_cpumon_percentage,
2405  	    sizeof(proc_max_cpumon_percentage))) {
2406  		uint64_t max_percentage = 0ULL;
2407  
2408  		if (!PE_get_default("kern.max_cpumon_percentage", &max_percentage,
2409  		    sizeof(max_percentage))) {
2410  			max_percentage = DEFAULT_CPUMON_PERCENTAGE;
2411  		}
2412  
2413  		assert(max_percentage <= UINT8_MAX);
2414  		proc_max_cpumon_percentage = (uint8_t) max_percentage;
2415  	}
2416  
2417  	if (proc_max_cpumon_percentage > 100) {
2418  		proc_max_cpumon_percentage = 100;
2419  	}
2420  
2421  	/*
2422  	 * The interval should be specified in seconds.
2423  	 *
2424  	 * Like the max CPU percentage, the max CPU interval can be configured
2425  	 * via boot-args and the device tree.
2426  	 */
2427  	if (!PE_parse_boot_argn("max_cpumon_interval", &proc_max_cpumon_interval,
2428  	    sizeof(proc_max_cpumon_interval))) {
2429  		if (!PE_get_default("kern.max_cpumon_interval", &proc_max_cpumon_interval,
2430  		    sizeof(proc_max_cpumon_interval))) {
2431  			proc_max_cpumon_interval = DEFAULT_CPUMON_INTERVAL;
2432  		}
2433  	}
2434  
2435  	proc_max_cpumon_interval *= NSEC_PER_SEC;
2436  
2437  	/* TEMPORARY boot arg to control App suppression */
2438  	PE_parse_boot_argn("task_policy_suppression_flags",
2439  	    &task_policy_suppression_flags,
2440  	    sizeof(task_policy_suppression_flags));
2441  
2442  	/* adjust suppression disk policy if called for in boot arg */
2443  	if (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_IOTIER2) {
2444  		proc_suppressed_disk_tier = THROTTLE_LEVEL_TIER2;
2445  	}
2446  }
2447  
2448  /*
2449   * Currently supported configurations for CPU limits.
2450   *
2451   * Policy				| Deadline-based CPU limit | Percentage-based CPU limit
2452   * -------------------------------------+--------------------------+------------------------------
2453   * PROC_POLICY_RSRCACT_THROTTLE		| ENOTSUP		   | Task-wide scope only
2454   * PROC_POLICY_RSRCACT_SUSPEND		| Task-wide scope only	   | ENOTSUP
2455   * PROC_POLICY_RSRCACT_TERMINATE	| Task-wide scope only	   | ENOTSUP
2456   * PROC_POLICY_RSRCACT_NOTIFY_KQ	| Task-wide scope only	   | ENOTSUP
2457   * PROC_POLICY_RSRCACT_NOTIFY_EXC	| ENOTSUP		   | Per-thread scope only
2458   *
2459   * A deadline-based CPU limit is actually a simple wallclock timer - the requested action is performed
2460   * after the specified amount of wallclock time has elapsed.
2461   *
2462   * A percentage-based CPU limit performs the requested action after the specified amount of actual CPU time
2463   * has been consumed -- regardless of how much wallclock time has elapsed -- by either the task as an
2464   * aggregate entity (so-called "Task-wide" or "Proc-wide" scope, whereby the CPU time consumed by all threads
2465   * in the task are added together), or by any one thread in the task (so-called "per-thread" scope).
2466   *
2467   * We support either deadline != 0 OR percentage != 0, but not both. The original intention in having them
2468   * share an API was to use actual CPU time as the basis of the deadline-based limit (as in: perform an action
2469   * after I have used some amount of CPU time; this is different than the recurring percentage/interval model)
2470   * but the potential consumer of the API at the time was insisting on wallclock time instead.
2471   *
2472   * Currently, requesting notification via an exception is the only way to get per-thread scope for a
2473   * CPU limit. All other types of notifications force task-wide scope for the limit.
2474   */
2475  int
2476  proc_set_task_ruse_cpu(task_t task, uint16_t policy, uint8_t percentage, uint64_t interval, uint64_t deadline,
2477      int cpumon_entitled)
2478  {
2479  	int error = 0;
2480  	int scope;
2481  
2482  	/*
2483  	 * Enforce the matrix of supported configurations for policy, percentage, and deadline.
2484  	 */
2485  	switch (policy) {
2486  	// If no policy is explicitly given, the default is to throttle.
2487  	case TASK_POLICY_RESOURCE_ATTRIBUTE_NONE:
2488  	case TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE:
2489  		if (deadline != 0) {
2490  			return ENOTSUP;
2491  		}
2492  		scope = TASK_RUSECPU_FLAGS_PROC_LIMIT;
2493  		break;
2494  	case TASK_POLICY_RESOURCE_ATTRIBUTE_SUSPEND:
2495  	case TASK_POLICY_RESOURCE_ATTRIBUTE_TERMINATE:
2496  	case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_KQ:
2497  		if (percentage != 0) {
2498  			return ENOTSUP;
2499  		}
2500  		scope = TASK_RUSECPU_FLAGS_DEADLINE;
2501  		break;
2502  	case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC:
2503  		if (deadline != 0) {
2504  			return ENOTSUP;
2505  		}
2506  		scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2507  #ifdef CONFIG_NOMONITORS
2508  		return error;
2509  #endif /* CONFIG_NOMONITORS */
2510  		break;
2511  	default:
2512  		return EINVAL;
2513  	}
2514  
2515  	task_lock(task);
2516  	if (task != current_task()) {
2517  		task->policy_ru_cpu_ext = policy;
2518  	} else {
2519  		task->policy_ru_cpu = policy;
2520  	}
2521  	error = task_set_cpuusage(task, percentage, interval, deadline, scope, cpumon_entitled);
2522  	task_unlock(task);
2523  	return error;
2524  }
2525  
2526  /* TODO: get rid of these */
2527  #define TASK_POLICY_CPU_RESOURCE_USAGE          0
2528  #define TASK_POLICY_WIREDMEM_RESOURCE_USAGE     1
2529  #define TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE   2
2530  #define TASK_POLICY_DISK_RESOURCE_USAGE         3
2531  #define TASK_POLICY_NETWORK_RESOURCE_USAGE      4
2532  #define TASK_POLICY_POWER_RESOURCE_USAGE        5
2533  
2534  #define TASK_POLICY_RESOURCE_USAGE_COUNT        6
2535  
2536  int
2537  proc_clear_task_ruse_cpu(task_t task, int cpumon_entitled)
2538  {
2539  	int error = 0;
2540  	int action;
2541  	void * bsdinfo = NULL;
2542  
2543  	task_lock(task);
2544  	if (task != current_task()) {
2545  		task->policy_ru_cpu_ext = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT;
2546  	} else {
2547  		task->policy_ru_cpu = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT;
2548  	}
2549  
2550  	error = task_clear_cpuusage_locked(task, cpumon_entitled);
2551  	if (error != 0) {
2552  		goto out;
2553  	}
2554  
2555  	action = task->applied_ru_cpu;
2556  	if (task->applied_ru_cpu_ext != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2557  		/* reset action */
2558  		task->applied_ru_cpu_ext = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2559  	}
2560  	if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2561  		bsdinfo = task->bsd_info;
2562  		task_unlock(task);
2563  		proc_restore_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action);
2564  		goto out1;
2565  	}
2566  
2567  out:
2568  	task_unlock(task);
2569  out1:
2570  	return error;
2571  }
2572  
2573  /* used to apply resource limit related actions */
2574  static int
2575  task_apply_resource_actions(task_t task, int type)
2576  {
2577  	int action = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2578  	void * bsdinfo = NULL;
2579  
2580  	switch (type) {
2581  	case TASK_POLICY_CPU_RESOURCE_USAGE:
2582  		break;
2583  	case TASK_POLICY_WIREDMEM_RESOURCE_USAGE:
2584  	case TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE:
2585  	case TASK_POLICY_DISK_RESOURCE_USAGE:
2586  	case TASK_POLICY_NETWORK_RESOURCE_USAGE:
2587  	case TASK_POLICY_POWER_RESOURCE_USAGE:
2588  		return 0;
2589  
2590  	default:
2591  		return 1;
2592  	}
2593  	;
2594  
2595  	/* only cpu actions for now */
2596  	task_lock(task);
2597  
2598  	if (task->applied_ru_cpu_ext == TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2599  		/* apply action */
2600  		task->applied_ru_cpu_ext = task->policy_ru_cpu_ext;
2601  		action = task->applied_ru_cpu_ext;
2602  	} else {
2603  		action = task->applied_ru_cpu_ext;
2604  	}
2605  
2606  	if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2607  		bsdinfo = task->bsd_info;
2608  		task_unlock(task);
2609  		proc_apply_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action);
2610  	} else {
2611  		task_unlock(task);
2612  	}
2613  
2614  	return 0;
2615  }
2616  
2617  /*
2618   * XXX This API is somewhat broken; we support multiple simultaneous CPU limits, but the get/set API
2619   * only allows for one at a time. This means that if there is a per-thread limit active, the other
2620   * "scopes" will not be accessible via this API. We could change it to pass in the scope of interest
2621   * to the caller, and prefer that, but there's no need for that at the moment.
2622   */
2623  static int
2624  task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep, int *scope)
2625  {
2626  	*percentagep = 0;
2627  	*intervalp = 0;
2628  	*deadlinep = 0;
2629  
2630  	if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) != 0) {
2631  		*scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2632  		*percentagep = task->rusage_cpu_perthr_percentage;
2633  		*intervalp = task->rusage_cpu_perthr_interval;
2634  	} else if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) != 0) {
2635  		*scope = TASK_RUSECPU_FLAGS_PROC_LIMIT;
2636  		*percentagep = task->rusage_cpu_percentage;
2637  		*intervalp = task->rusage_cpu_interval;
2638  	} else if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) != 0) {
2639  		*scope = TASK_RUSECPU_FLAGS_DEADLINE;
2640  		*deadlinep = task->rusage_cpu_deadline;
2641  	} else {
2642  		*scope = 0;
2643  	}
2644  
2645  	return 0;
2646  }
2647  
2648  /*
2649   * Suspend the CPU usage monitor for the task.  Return value indicates
2650   * if the mechanism was actually enabled.
2651   */
2652  int
2653  task_suspend_cpumon(task_t task)
2654  {
2655  	thread_t thread;
2656  
2657  	task_lock_assert_owned(task);
2658  
2659  	if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) == 0) {
2660  		return KERN_INVALID_ARGUMENT;
2661  	}
2662  
2663  #if CONFIG_TELEMETRY
2664  	/*
2665  	 * Disable task-wide telemetry if it was ever enabled by the CPU usage
2666  	 * monitor's warning zone.
2667  	 */
2668  	telemetry_task_ctl_locked(task, TF_CPUMON_WARNING, 0);
2669  #endif
2670  
2671  	/*
2672  	 * Suspend monitoring for the task, and propagate that change to each thread.
2673  	 */
2674  	task->rusage_cpu_flags &= ~(TASK_RUSECPU_FLAGS_PERTHR_LIMIT | TASK_RUSECPU_FLAGS_FATAL_CPUMON);
2675  	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2676  		act_set_astledger(thread);
2677  	}
2678  
2679  	return KERN_SUCCESS;
2680  }
2681  
2682  /*
2683   * Remove all traces of the CPU monitor.
2684   */
2685  int
2686  task_disable_cpumon(task_t task)
2687  {
2688  	int kret;
2689  
2690  	task_lock_assert_owned(task);
2691  
2692  	kret = task_suspend_cpumon(task);
2693  	if (kret) {
2694  		return kret;
2695  	}
2696  
2697  	/* Once we clear these values, the monitor can't be resumed */
2698  	task->rusage_cpu_perthr_percentage = 0;
2699  	task->rusage_cpu_perthr_interval = 0;
2700  
2701  	return KERN_SUCCESS;
2702  }
2703  
2704  
2705  static int
2706  task_enable_cpumon_locked(task_t task)
2707  {
2708  	thread_t thread;
2709  	task_lock_assert_owned(task);
2710  
2711  	if (task->rusage_cpu_perthr_percentage == 0 ||
2712  	    task->rusage_cpu_perthr_interval == 0) {
2713  		return KERN_INVALID_ARGUMENT;
2714  	}
2715  
2716  	task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2717  	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2718  		act_set_astledger(thread);
2719  	}
2720  
2721  	return KERN_SUCCESS;
2722  }
2723  
2724  int
2725  task_resume_cpumon(task_t task)
2726  {
2727  	kern_return_t kret;
2728  
2729  	if (!task) {
2730  		return EINVAL;
2731  	}
2732  
2733  	task_lock(task);
2734  	kret = task_enable_cpumon_locked(task);
2735  	task_unlock(task);
2736  
2737  	return kret;
2738  }
2739  
2740  
2741  /* duplicate values from bsd/sys/process_policy.h */
2742  #define PROC_POLICY_CPUMON_DISABLE      0xFF
2743  #define PROC_POLICY_CPUMON_DEFAULTS     0xFE
2744  
2745  static int
2746  task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int cpumon_entitled)
2747  {
2748  	uint64_t abstime = 0;
2749  	uint64_t limittime = 0;
2750  
2751  	lck_mtx_assert(&task->lock, LCK_MTX_ASSERT_OWNED);
2752  
2753  	/* By default, refill once per second */
2754  	if (interval == 0) {
2755  		interval = NSEC_PER_SEC;
2756  	}
2757  
2758  	if (percentage != 0) {
2759  		if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2760  			boolean_t warn = FALSE;
2761  
2762  			/*
2763  			 * A per-thread CPU limit on a task generates an exception
2764  			 * (LEDGER_ACTION_EXCEPTION) if any one thread in the task
2765  			 * exceeds the limit.
2766  			 */
2767  
2768  			if (percentage == PROC_POLICY_CPUMON_DISABLE) {
2769  				if (cpumon_entitled) {
2770  					/* 25095698 - task_disable_cpumon() should be reliable */
2771  					task_disable_cpumon(task);
2772  					return 0;
2773  				}
2774  
2775  				/*
2776  				 * This task wishes to disable the CPU usage monitor, but it's
2777  				 * missing the required entitlement:
2778  				 *     com.apple.private.kernel.override-cpumon
2779  				 *
2780  				 * Instead, treat this as a request to reset its params
2781  				 * back to the defaults.
2782  				 */
2783  				warn = TRUE;
2784  				percentage = PROC_POLICY_CPUMON_DEFAULTS;
2785  			}
2786  
2787  			if (percentage == PROC_POLICY_CPUMON_DEFAULTS) {
2788  				percentage = proc_max_cpumon_percentage;
2789  				interval   = proc_max_cpumon_interval;
2790  			}
2791  
2792  			if (percentage > 100) {
2793  				percentage = 100;
2794  			}
2795  
2796  			/*
2797  			 * Passing in an interval of -1 means either:
2798  			 * - Leave the interval as-is, if there's already a per-thread
2799  			 *   limit configured
2800  			 * - Use the system default.
2801  			 */
2802  			if (interval == -1ULL) {
2803  				if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2804  					interval = task->rusage_cpu_perthr_interval;
2805  				} else {
2806  					interval = proc_max_cpumon_interval;
2807  				}
2808  			}
2809  
2810  			/*
2811  			 * Enforce global caps on CPU usage monitor here if the process is not
2812  			 * entitled to escape the global caps.
2813  			 */
2814  			if ((percentage > proc_max_cpumon_percentage) && (cpumon_entitled == 0)) {
2815  				warn = TRUE;
2816  				percentage = proc_max_cpumon_percentage;
2817  			}
2818  
2819  			if ((interval > proc_max_cpumon_interval) && (cpumon_entitled == 0)) {
2820  				warn = TRUE;
2821  				interval = proc_max_cpumon_interval;
2822  			}
2823  
2824  			if (warn) {
2825  				int       pid = 0;
2826  				const char *procname = "unknown";
2827  
2828  #ifdef MACH_BSD
2829  				pid = proc_selfpid();
2830  				if (current_task()->bsd_info != NULL) {
2831  					procname = proc_name_address(current_task()->bsd_info);
2832  				}
2833  #endif
2834  
2835  				printf("process %s[%d] denied attempt to escape CPU monitor"
2836  				    " (missing required entitlement).\n", procname, pid);
2837  			}
2838  
2839  			/* configure the limit values */
2840  			task->rusage_cpu_perthr_percentage = percentage;
2841  			task->rusage_cpu_perthr_interval = interval;
2842  
2843  			/* and enable the CPU monitor */
2844  			(void)task_enable_cpumon_locked(task);
2845  		} else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) {
2846  			/*
2847  			 * Currently, a proc-wide CPU limit always blocks if the limit is
2848  			 * exceeded (LEDGER_ACTION_BLOCK).
2849  			 */
2850  			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PROC_LIMIT;
2851  			task->rusage_cpu_percentage = percentage;
2852  			task->rusage_cpu_interval = interval;
2853  
2854  			limittime = (interval * percentage) / 100;
2855  			nanoseconds_to_absolutetime(limittime, &abstime);
2856  
2857  			ledger_set_limit(task->ledger, task_ledgers.cpu_time, abstime, 0);
2858  			ledger_set_period(task->ledger, task_ledgers.cpu_time, interval);
2859  			ledger_set_action(task->ledger, task_ledgers.cpu_time, LEDGER_ACTION_BLOCK);
2860  		}
2861  	}
2862  
2863  	if (deadline != 0) {
2864  		assert(scope == TASK_RUSECPU_FLAGS_DEADLINE);
2865  
2866  		/* if already in use, cancel and wait for it to cleanout */
2867  		if (task->rusage_cpu_callt != NULL) {
2868  			task_unlock(task);
2869  			thread_call_cancel_wait(task->rusage_cpu_callt);
2870  			task_lock(task);
2871  		}
2872  		if (task->rusage_cpu_callt == NULL) {
2873  			task->rusage_cpu_callt = thread_call_allocate_with_priority(task_action_cpuusage, (thread_call_param_t)task, THREAD_CALL_PRIORITY_KERNEL);
2874  		}
2875  		/* setup callout */
2876  		if (task->rusage_cpu_callt != 0) {
2877  			uint64_t save_abstime = 0;
2878  
2879  			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_DEADLINE;
2880  			task->rusage_cpu_deadline = deadline;
2881  
2882  			nanoseconds_to_absolutetime(deadline, &abstime);
2883  			save_abstime = abstime;
2884  			clock_absolutetime_interval_to_deadline(save_abstime, &abstime);
2885  			thread_call_enter_delayed(task->rusage_cpu_callt, abstime);
2886  		}
2887  	}
2888  
2889  	return 0;
2890  }
2891  
2892  int
2893  task_clear_cpuusage(task_t task, int cpumon_entitled)
2894  {
2895  	int retval = 0;
2896  
2897  	task_lock(task);
2898  	retval = task_clear_cpuusage_locked(task, cpumon_entitled);
2899  	task_unlock(task);
2900  
2901  	return retval;
2902  }
2903  
2904  static int
2905  task_clear_cpuusage_locked(task_t task, int cpumon_entitled)
2906  {
2907  	thread_call_t savecallt;
2908  
2909  	/* cancel percentage handling if set */
2910  	if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) {
2911  		task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_PROC_LIMIT;
2912  		ledger_set_limit(task->ledger, task_ledgers.cpu_time, LEDGER_LIMIT_INFINITY, 0);
2913  		task->rusage_cpu_percentage = 0;
2914  		task->rusage_cpu_interval = 0;
2915  	}
2916  
2917  	/*
2918  	 * Disable the CPU usage monitor.
2919  	 */
2920  	if (cpumon_entitled) {
2921  		task_disable_cpumon(task);
2922  	}
2923  
2924  	/* cancel deadline handling if set */
2925  	if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) {
2926  		task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_DEADLINE;
2927  		if (task->rusage_cpu_callt != 0) {
2928  			savecallt = task->rusage_cpu_callt;
2929  			task->rusage_cpu_callt = NULL;
2930  			task->rusage_cpu_deadline = 0;
2931  			task_unlock(task);
2932  			thread_call_cancel_wait(savecallt);
2933  			thread_call_free(savecallt);
2934  			task_lock(task);
2935  		}
2936  	}
2937  	return 0;
2938  }
2939  
2940  /* called by ledger unit to enforce action due to resource usage criteria being met */
2941  static void
2942  task_action_cpuusage(thread_call_param_t param0, __unused thread_call_param_t param1)
2943  {
2944  	task_t task = (task_t)param0;
2945  	(void)task_apply_resource_actions(task, TASK_POLICY_CPU_RESOURCE_USAGE);
2946  	return;
2947  }
2948  
2949  
2950  /*
2951   * Routines for taskwatch and pidbind
2952   */
2953  
2954  #if CONFIG_TASKWATCH
2955  
2956  LCK_MTX_DECLARE_ATTR(task_watch_mtx, &task_lck_grp, &task_lck_attr);
2957  
2958  static void
2959  task_watch_lock(void)
2960  {
2961  	lck_mtx_lock(&task_watch_mtx);
2962  }
2963  
2964  static void
2965  task_watch_unlock(void)
2966  {
2967  	lck_mtx_unlock(&task_watch_mtx);
2968  }
2969  
2970  static void
2971  add_taskwatch_locked(task_t task, task_watch_t * twp)
2972  {
2973  	queue_enter(&task->task_watchers, twp, task_watch_t *, tw_links);
2974  	task->num_taskwatchers++;
2975  }
2976  
2977  static void
2978  remove_taskwatch_locked(task_t task, task_watch_t * twp)
2979  {
2980  	queue_remove(&task->task_watchers, twp, task_watch_t *, tw_links);
2981  	task->num_taskwatchers--;
2982  }
2983  
2984  
2985  int
2986  proc_lf_pidbind(task_t curtask, uint64_t tid, task_t target_task, int bind)
2987  {
2988  	thread_t target_thread = NULL;
2989  	int ret = 0, setbg = 0;
2990  	task_watch_t *twp = NULL;
2991  	task_t task = TASK_NULL;
2992  
2993  	target_thread = task_findtid(curtask, tid);
2994  	if (target_thread == NULL) {
2995  		return ESRCH;
2996  	}
2997  	/* holds thread reference */
2998  
2999  	if (bind != 0) {
3000  		/* task is still active ? */
3001  		task_lock(target_task);
3002  		if (target_task->active == 0) {
3003  			task_unlock(target_task);
3004  			ret = ESRCH;
3005  			goto out;
3006  		}
3007  		task_unlock(target_task);
3008  
3009  		twp = (task_watch_t *)kalloc(sizeof(task_watch_t));
3010  		if (twp == NULL) {
3011  			task_watch_unlock();
3012  			ret = ENOMEM;
3013  			goto out;
3014  		}
3015  
3016  		bzero(twp, sizeof(task_watch_t));
3017  
3018  		task_watch_lock();
3019  
3020  		if (target_thread->taskwatch != NULL) {
3021  			/* already bound to another task */
3022  			task_watch_unlock();
3023  
3024  			kfree(twp, sizeof(task_watch_t));
3025  			ret = EBUSY;
3026  			goto out;
3027  		}
3028  
3029  		task_reference(target_task);
3030  
3031  		setbg = proc_get_effective_task_policy(target_task, TASK_POLICY_WATCHERS_BG);
3032  
3033  		twp->tw_task = target_task;             /* holds the task reference */
3034  		twp->tw_thread = target_thread;         /* holds the thread reference */
3035  		twp->tw_state = setbg;
3036  		twp->tw_importance = target_thread->importance;
3037  
3038  		add_taskwatch_locked(target_task, twp);
3039  
3040  		target_thread->taskwatch = twp;
3041  
3042  		task_watch_unlock();
3043  
3044  		if (setbg) {
3045  			set_thread_appbg(target_thread, setbg, INT_MIN);
3046  		}
3047  
3048  		/* retain the thread reference as it is in twp */
3049  		target_thread = NULL;
3050  	} else {
3051  		/* unbind */
3052  		task_watch_lock();
3053  		if ((twp = target_thread->taskwatch) != NULL) {
3054  			task = twp->tw_task;
3055  			target_thread->taskwatch = NULL;
3056  			remove_taskwatch_locked(task, twp);
3057  
3058  			task_watch_unlock();
3059  
3060  			task_deallocate(task);                  /* drop task ref in twp */
3061  			set_thread_appbg(target_thread, 0, twp->tw_importance);
3062  			thread_deallocate(target_thread);       /* drop thread ref in twp */
3063  			kfree(twp, sizeof(task_watch_t));
3064  		} else {
3065  			task_watch_unlock();
3066  			ret = 0;                /* return success if it not alredy bound */
3067  			goto out;
3068  		}
3069  	}
3070  out:
3071  	thread_deallocate(target_thread);       /* drop thread ref acquired in this routine */
3072  	return ret;
3073  }
3074  
3075  static void
3076  set_thread_appbg(thread_t thread, int setbg, __unused int importance)
3077  {
3078  	int enable = (setbg ? TASK_POLICY_ENABLE : TASK_POLICY_DISABLE);
3079  
3080  	proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_PIDBIND_BG, enable);
3081  }
3082  
3083  static void
3084  apply_appstate_watchers(task_t task)
3085  {
3086  	int numwatchers = 0, i, j, setbg;
3087  	thread_watchlist_t * threadlist;
3088  	task_watch_t * twp;
3089  
3090  retry:
3091  	/* if no watchers on the list return */
3092  	if ((numwatchers = task->num_taskwatchers) == 0) {
3093  		return;
3094  	}
3095  
3096  	threadlist = kheap_alloc(KHEAP_TEMP,
3097  	    numwatchers * sizeof(thread_watchlist_t), Z_WAITOK | Z_ZERO);
3098  	if (threadlist == NULL) {
3099  		return;
3100  	}
3101  
3102  	task_watch_lock();
3103  	/*serialize application of app state changes */
3104  
3105  	if (task->watchapplying != 0) {
3106  		lck_mtx_sleep(&task_watch_mtx, LCK_SLEEP_DEFAULT, &task->watchapplying, THREAD_UNINT);
3107  		task_watch_unlock();
3108  		kheap_free(KHEAP_TEMP, threadlist, numwatchers * sizeof(thread_watchlist_t));
3109  		goto retry;
3110  	}
3111  
3112  	if (numwatchers != task->num_taskwatchers) {
3113  		task_watch_unlock();
3114  		kheap_free(KHEAP_TEMP, threadlist, numwatchers * sizeof(thread_watchlist_t));
3115  		goto retry;
3116  	}
3117  
3118  	setbg = proc_get_effective_task_policy(task, TASK_POLICY_WATCHERS_BG);
3119  
3120  	task->watchapplying = 1;
3121  	i = 0;
3122  	queue_iterate(&task->task_watchers, twp, task_watch_t *, tw_links) {
3123  		threadlist[i].thread = twp->tw_thread;
3124  		thread_reference(threadlist[i].thread);
3125  		if (setbg != 0) {
3126  			twp->tw_importance = twp->tw_thread->importance;
3127  			threadlist[i].importance = INT_MIN;
3128  		} else {
3129  			threadlist[i].importance = twp->tw_importance;
3130  		}
3131  		i++;
3132  		if (i > numwatchers) {
3133  			break;
3134  		}
3135  	}
3136  
3137  	task_watch_unlock();
3138  
3139  	for (j = 0; j < i; j++) {
3140  		set_thread_appbg(threadlist[j].thread, setbg, threadlist[j].importance);
3141  		thread_deallocate(threadlist[j].thread);
3142  	}
3143  	kheap_free(KHEAP_TEMP, threadlist, numwatchers * sizeof(thread_watchlist_t));
3144  
3145  
3146  	task_watch_lock();
3147  	task->watchapplying = 0;
3148  	thread_wakeup_one(&task->watchapplying);
3149  	task_watch_unlock();
3150  }
3151  
3152  void
3153  thead_remove_taskwatch(thread_t thread)
3154  {
3155  	task_watch_t * twp;
3156  	int importance = 0;
3157  
3158  	task_watch_lock();
3159  	if ((twp = thread->taskwatch) != NULL) {
3160  		thread->taskwatch = NULL;
3161  		remove_taskwatch_locked(twp->tw_task, twp);
3162  	}
3163  	task_watch_unlock();
3164  	if (twp != NULL) {
3165  		thread_deallocate(twp->tw_thread);
3166  		task_deallocate(twp->tw_task);
3167  		importance = twp->tw_importance;
3168  		kfree(twp, sizeof(task_watch_t));
3169  		/* remove the thread and networkbg */
3170  		set_thread_appbg(thread, 0, importance);
3171  	}
3172  }
3173  
3174  void
3175  task_removewatchers(task_t task)
3176  {
3177  	queue_head_t queue;
3178  	task_watch_t *twp;
3179  
3180  	task_watch_lock();
3181  	queue_new_head(&task->task_watchers, &queue, task_watch_t *, tw_links);
3182  	queue_init(&task->task_watchers);
3183  
3184  	queue_iterate(&queue, twp, task_watch_t *, tw_links) {
3185  		/*
3186  		 * Since the linkage is removed and thead state cleanup is already set up,
3187  		 * remove the refernce from the thread.
3188  		 */
3189  		twp->tw_thread->taskwatch = NULL;       /* removed linkage, clear thread holding ref */
3190  	}
3191  
3192  	task->num_taskwatchers = 0;
3193  	task_watch_unlock();
3194  
3195  	while (!queue_empty(&queue)) {
3196  		queue_remove_first(&queue, twp, task_watch_t *, tw_links);
3197  		/* remove thread and network bg */
3198  		set_thread_appbg(twp->tw_thread, 0, twp->tw_importance);
3199  		thread_deallocate(twp->tw_thread);
3200  		task_deallocate(twp->tw_task);
3201  		kfree(twp, sizeof(task_watch_t));
3202  	}
3203  }
3204  #endif /* CONFIG_TASKWATCH */
3205  
3206  /*
3207   * Routines for importance donation/inheritance/boosting
3208   */
3209  
3210  static void
3211  task_importance_update_live_donor(task_t target_task)
3212  {
3213  #if IMPORTANCE_INHERITANCE
3214  
3215  	ipc_importance_task_t task_imp;
3216  
3217  	task_imp = ipc_importance_for_task(target_task, FALSE);
3218  	if (IIT_NULL != task_imp) {
3219  		ipc_importance_task_update_live_donor(task_imp);
3220  		ipc_importance_task_release(task_imp);
3221  	}
3222  #endif /* IMPORTANCE_INHERITANCE */
3223  }
3224  
3225  void
3226  task_importance_mark_donor(task_t task, boolean_t donating)
3227  {
3228  #if IMPORTANCE_INHERITANCE
3229  	ipc_importance_task_t task_imp;
3230  
3231  	task_imp = ipc_importance_for_task(task, FALSE);
3232  	if (IIT_NULL != task_imp) {
3233  		ipc_importance_task_mark_donor(task_imp, donating);
3234  		ipc_importance_task_release(task_imp);
3235  	}
3236  #endif /* IMPORTANCE_INHERITANCE */
3237  }
3238  
3239  void
3240  task_importance_mark_live_donor(task_t task, boolean_t live_donating)
3241  {
3242  #if IMPORTANCE_INHERITANCE
3243  	ipc_importance_task_t task_imp;
3244  
3245  	task_imp = ipc_importance_for_task(task, FALSE);
3246  	if (IIT_NULL != task_imp) {
3247  		ipc_importance_task_mark_live_donor(task_imp, live_donating);
3248  		ipc_importance_task_release(task_imp);
3249  	}
3250  #endif /* IMPORTANCE_INHERITANCE */
3251  }
3252  
3253  void
3254  task_importance_mark_receiver(task_t task, boolean_t receiving)
3255  {
3256  #if IMPORTANCE_INHERITANCE
3257  	ipc_importance_task_t task_imp;
3258  
3259  	task_imp = ipc_importance_for_task(task, FALSE);
3260  	if (IIT_NULL != task_imp) {
3261  		ipc_importance_task_mark_receiver(task_imp, receiving);
3262  		ipc_importance_task_release(task_imp);
3263  	}
3264  #endif /* IMPORTANCE_INHERITANCE */
3265  }
3266  
3267  void
3268  task_importance_mark_denap_receiver(task_t task, boolean_t denap)
3269  {
3270  #if IMPORTANCE_INHERITANCE
3271  	ipc_importance_task_t task_imp;
3272  
3273  	task_imp = ipc_importance_for_task(task, FALSE);
3274  	if (IIT_NULL != task_imp) {
3275  		ipc_importance_task_mark_denap_receiver(task_imp, denap);
3276  		ipc_importance_task_release(task_imp);
3277  	}
3278  #endif /* IMPORTANCE_INHERITANCE */
3279  }
3280  
3281  void
3282  task_importance_reset(__imp_only task_t task)
3283  {
3284  #if IMPORTANCE_INHERITANCE
3285  	ipc_importance_task_t task_imp;
3286  
3287  	/* TODO: Lower importance downstream before disconnect */
3288  	task_imp = task->task_imp_base;
3289  	ipc_importance_reset(task_imp, FALSE);
3290  	task_importance_update_live_donor(task);
3291  #endif /* IMPORTANCE_INHERITANCE */
3292  }
3293  
3294  void
3295  task_importance_init_from_parent(__imp_only task_t new_task, __imp_only task_t parent_task)
3296  {
3297  #if IMPORTANCE_INHERITANCE
3298  	ipc_importance_task_t new_task_imp = IIT_NULL;
3299  
3300  	new_task->task_imp_base = NULL;
3301  	if (!parent_task) {
3302  		return;
3303  	}
3304  
3305  	if (task_is_marked_importance_donor(parent_task)) {
3306  		new_task_imp = ipc_importance_for_task(new_task, FALSE);
3307  		assert(IIT_NULL != new_task_imp);
3308  		ipc_importance_task_mark_donor(new_task_imp, TRUE);
3309  	}
3310  	if (task_is_marked_live_importance_donor(parent_task)) {
3311  		if (IIT_NULL == new_task_imp) {
3312  			new_task_imp = ipc_importance_for_task(new_task, FALSE);
3313  		}
3314  		assert(IIT_NULL != new_task_imp);
3315  		ipc_importance_task_mark_live_donor(new_task_imp, TRUE);
3316  	}
3317  	/* Do not inherit 'receiver' on fork, vfexec or true spawn */
3318  	if (task_is_exec_copy(new_task) &&
3319  	    task_is_marked_importance_receiver(parent_task)) {
3320  		if (IIT_NULL == new_task_imp) {
3321  			new_task_imp = ipc_importance_for_task(new_task, FALSE);
3322  		}
3323  		assert(IIT_NULL != new_task_imp);
3324  		ipc_importance_task_mark_receiver(new_task_imp, TRUE);
3325  	}
3326  	if (task_is_marked_importance_denap_receiver(parent_task)) {
3327  		if (IIT_NULL == new_task_imp) {
3328  			new_task_imp = ipc_importance_for_task(new_task, FALSE);
3329  		}
3330  		assert(IIT_NULL != new_task_imp);
3331  		ipc_importance_task_mark_denap_receiver(new_task_imp, TRUE);
3332  	}
3333  	if (IIT_NULL != new_task_imp) {
3334  		assert(new_task->task_imp_base == new_task_imp);
3335  		ipc_importance_task_release(new_task_imp);
3336  	}
3337  #endif /* IMPORTANCE_INHERITANCE */
3338  }
3339  
3340  #if IMPORTANCE_INHERITANCE
3341  /*
3342   * Sets the task boost bit to the provided value.  Does NOT run the update function.
3343   *
3344   * Task lock must be held.
3345   */
3346  static void
3347  task_set_boost_locked(task_t task, boolean_t boost_active)
3348  {
3349  #if IMPORTANCE_TRACE
3350  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_START),
3351  	    proc_selfpid(), task_pid(task), trequested_0(task), trequested_1(task), 0);
3352  #endif /* IMPORTANCE_TRACE */
3353  
3354  	task->requested_policy.trp_boosted = boost_active;
3355  
3356  #if IMPORTANCE_TRACE
3357  	if (boost_active == TRUE) {
3358  		DTRACE_BOOST2(boost, task_t, task, int, task_pid(task));
3359  	} else {
3360  		DTRACE_BOOST2(unboost, task_t, task, int, task_pid(task));
3361  	}
3362  	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_END),
3363  	    proc_selfpid(), task_pid(task),
3364  	    trequested_0(task), trequested_1(task), 0);
3365  #endif /* IMPORTANCE_TRACE */
3366  }
3367  
3368  /*
3369   * Sets the task boost bit to the provided value and applies the update.
3370   *
3371   * Task lock must be held.  Must call update complete after unlocking the task.
3372   */
3373  void
3374  task_update_boost_locked(task_t task, boolean_t boost_active, task_pend_token_t pend_token)
3375  {
3376  	task_set_boost_locked(task, boost_active);
3377  
3378  	task_policy_update_locked(task, pend_token);
3379  }
3380  
3381  /*
3382   * Check if this task should donate importance.
3383   *
3384   * May be called without taking the task lock. In that case, donor status can change
3385   * so you must check only once for each donation event.
3386   */
3387  boolean_t
3388  task_is_importance_donor(task_t task)
3389  {
3390  	if (task->task_imp_base == IIT_NULL) {
3391  		return FALSE;
3392  	}
3393  	return ipc_importance_task_is_donor(task->task_imp_base);
3394  }
3395  
3396  /*
3397   * Query the status of the task's donor mark.
3398   */
3399  boolean_t
3400  task_is_marked_importance_donor(task_t task)
3401  {
3402  	if (task->task_imp_base == IIT_NULL) {
3403  		return FALSE;
3404  	}
3405  	return ipc_importance_task_is_marked_donor(task->task_imp_base);
3406  }
3407  
3408  /*
3409   * Query the status of the task's live donor and donor mark.
3410   */
3411  boolean_t
3412  task_is_marked_live_importance_donor(task_t task)
3413  {
3414  	if (task->task_imp_base == IIT_NULL) {
3415  		return FALSE;
3416  	}
3417  	return ipc_importance_task_is_marked_live_donor(task->task_imp_base);
3418  }
3419  
3420  
3421  /*
3422   * This routine may be called without holding task lock
3423   * since the value of imp_receiver can never be unset.
3424   */
3425  boolean_t
3426  task_is_importance_receiver(task_t task)
3427  {
3428  	if (task->task_imp_base == IIT_NULL) {
3429  		return FALSE;
3430  	}
3431  	return ipc_importance_task_is_marked_receiver(task->task_imp_base);
3432  }
3433  
3434  /*
3435   * Query the task's receiver mark.
3436   */
3437  boolean_t
3438  task_is_marked_importance_receiver(task_t task)
3439  {
3440  	if (task->task_imp_base == IIT_NULL) {
3441  		return FALSE;
3442  	}
3443  	return ipc_importance_task_is_marked_receiver(task->task_imp_base);
3444  }
3445  
3446  /*
3447   * This routine may be called without holding task lock
3448   * since the value of de-nap receiver can never be unset.
3449   */
3450  boolean_t
3451  task_is_importance_denap_receiver(task_t task)
3452  {
3453  	if (task->task_imp_base == IIT_NULL) {
3454  		return FALSE;
3455  	}
3456  	return ipc_importance_task_is_denap_receiver(task->task_imp_base);
3457  }
3458  
3459  /*
3460   * Query the task's de-nap receiver mark.
3461   */
3462  boolean_t
3463  task_is_marked_importance_denap_receiver(task_t task)
3464  {
3465  	if (task->task_imp_base == IIT_NULL) {
3466  		return FALSE;
3467  	}
3468  	return ipc_importance_task_is_marked_denap_receiver(task->task_imp_base);
3469  }
3470  
3471  /*
3472   * This routine may be called without holding task lock
3473   * since the value of imp_receiver can never be unset.
3474   */
3475  boolean_t
3476  task_is_importance_receiver_type(task_t task)
3477  {
3478  	if (task->task_imp_base == IIT_NULL) {
3479  		return FALSE;
3480  	}
3481  	return task_is_importance_receiver(task) ||
3482  	       task_is_importance_denap_receiver(task);
3483  }
3484  
3485  /*
3486   * External importance assertions are managed by the process in userspace
3487   * Internal importance assertions are the responsibility of the kernel
3488   * Assertions are changed from internal to external via task_importance_externalize_assertion
3489   */
3490  
3491  int
3492  task_importance_hold_internal_assertion(task_t target_task, uint32_t count)
3493  {
3494  	ipc_importance_task_t task_imp;
3495  	kern_return_t ret;
3496  
3497  	/* may be first time, so allow for possible importance setup */
3498  	task_imp = ipc_importance_for_task(target_task, FALSE);
3499  	if (IIT_NULL == task_imp) {
3500  		return EOVERFLOW;
3501  	}
3502  	ret = ipc_importance_task_hold_internal_assertion(task_imp, count);
3503  	ipc_importance_task_release(task_imp);
3504  
3505  	return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
3506  }
3507  
3508  int
3509  task_importance_hold_file_lock_assertion(task_t target_task, uint32_t count)
3510  {
3511  	ipc_importance_task_t task_imp;
3512  	kern_return_t ret;
3513  
3514  	/* may be first time, so allow for possible importance setup */
3515  	task_imp = ipc_importance_for_task(target_task, FALSE);
3516  	if (IIT_NULL == task_imp) {
3517  		return EOVERFLOW;
3518  	}
3519  	ret = ipc_importance_task_hold_file_lock_assertion(task_imp, count);
3520  	ipc_importance_task_release(task_imp);
3521  
3522  	return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
3523  }
3524  
3525  int
3526  task_importance_hold_legacy_external_assertion(task_t target_task, uint32_t count)
3527  {
3528  	ipc_importance_task_t task_imp;
3529  	kern_return_t ret;
3530  
3531  	/* must already have set up an importance */
3532  	task_imp = target_task->task_imp_base;
3533  	if (IIT_NULL == task_imp) {
3534  		return EOVERFLOW;
3535  	}
3536  	ret = ipc_importance_task_hold_legacy_external_assertion(task_imp, count);
3537  	return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
3538  }
3539  
3540  int
3541  task_importance_drop_file_lock_assertion(task_t target_task, uint32_t count)
3542  {
3543  	ipc_importance_task_t task_imp;
3544  	kern_return_t ret;
3545  
3546  	/* must already have set up an importance */
3547  	task_imp = target_task->task_imp_base;
3548  	if (IIT_NULL == task_imp) {
3549  		return EOVERFLOW;
3550  	}
3551  	ret = ipc_importance_task_drop_file_lock_assertion(target_task->task_imp_base, count);
3552  	return (KERN_SUCCESS != ret) ? EOVERFLOW : 0;
3553  }
3554  
3555  int
3556  task_importance_drop_legacy_external_assertion(task_t target_task, uint32_t count)
3557  {
3558  	ipc_importance_task_t task_imp;
3559  	kern_return_t ret;
3560  
3561  	/* must already have set up an importance */
3562  	task_imp = target_task->task_imp_base;
3563  	if (IIT_NULL == task_imp) {
3564  		return EOVERFLOW;
3565  	}
3566  	ret = ipc_importance_task_drop_legacy_external_assertion(task_imp, count);
3567  	return (KERN_SUCCESS != ret) ? EOVERFLOW : 0;
3568  }
3569  
3570  static void
3571  task_add_importance_watchport(task_t task, mach_port_t port, int *boostp)
3572  {
3573  	int boost = 0;
3574  
3575  	__imptrace_only int released_pid = 0;
3576  	__imptrace_only int pid = task_pid(task);
3577  
3578  	ipc_importance_task_t release_imp_task = IIT_NULL;
3579  
3580  	if (IP_VALID(port) != 0) {
3581  		ipc_importance_task_t new_imp_task = ipc_importance_for_task(task, FALSE);
3582  
3583  		ip_lock(port);
3584  
3585  		/*
3586  		 * The port must have been marked tempowner already.
3587  		 * This also filters out ports whose receive rights
3588  		 * are already enqueued in a message, as you can't
3589  		 * change the right's destination once it's already
3590  		 * on its way.
3591  		 */
3592  		if (port->ip_tempowner != 0) {
3593  			assert(port->ip_impdonation != 0);
3594  
3595  			boost = port->ip_impcount;
3596  			if (IIT_NULL != port->ip_imp_task) {
3597  				/*
3598  				 * if this port is already bound to a task,
3599  				 * release the task reference and drop any
3600  				 * watchport-forwarded boosts
3601  				 */
3602  				release_imp_task = port->ip_imp_task;
3603  				port->ip_imp_task = IIT_NULL;
3604  			}
3605  
3606  			/* mark the port is watching another task (reference held in port->ip_imp_task) */
3607  			if (ipc_importance_task_is_marked_receiver(new_imp_task)) {
3608  				port->ip_imp_task = new_imp_task;
3609  				new_imp_task = IIT_NULL;
3610  			}
3611  		}
3612  		ip_unlock(port);
3613  
3614  		if (IIT_NULL != new_imp_task) {
3615  			ipc_importance_task_release(new_imp_task);
3616  		}
3617  
3618  		if (IIT_NULL != release_imp_task) {
3619  			if (boost > 0) {
3620  				ipc_importance_task_drop_internal_assertion(release_imp_task, boost);
3621  			}
3622  
3623  			// released_pid = task_pid(release_imp_task); /* TODO: Need ref-safe way to get pid */
3624  			ipc_importance_task_release(release_imp_task);
3625  		}
3626  #if IMPORTANCE_TRACE
3627  		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_WATCHPORT, 0)) | DBG_FUNC_NONE,
3628  		    proc_selfpid(), pid, boost, released_pid, 0);
3629  #endif /* IMPORTANCE_TRACE */
3630  	}
3631  
3632  	*boostp = boost;
3633  	return;
3634  }
3635  
3636  #endif /* IMPORTANCE_INHERITANCE */
3637  
3638  /*
3639   * Routines for VM to query task importance
3640   */
3641  
3642  
3643  /*
3644   * Order to be considered while estimating importance
3645   * for low memory notification and purging purgeable memory.
3646   */
3647  #define TASK_IMPORTANCE_FOREGROUND     4
3648  #define TASK_IMPORTANCE_NOTDARWINBG    1
3649  
3650  
3651  /*
3652   * (Un)Mark the task as a privileged listener for memory notifications.
3653   * if marked, this task will be among the first to be notified amongst
3654   * the bulk of all other tasks when the system enters a pressure level
3655   * of interest to this task.
3656   */
3657  int
3658  task_low_mem_privileged_listener(task_t task, boolean_t new_value, boolean_t *old_value)
3659  {
3660  	if (old_value != NULL) {
3661  		*old_value = (boolean_t)task->low_mem_privileged_listener;
3662  	} else {
3663  		task_lock(task);
3664  		task->low_mem_privileged_listener = (uint32_t)new_value;
3665  		task_unlock(task);
3666  	}
3667  
3668  	return 0;
3669  }
3670  
3671  /*
3672   * Checks if the task is already notified.
3673   *
3674   * Condition: task lock should be held while calling this function.
3675   */
3676  boolean_t
3677  task_has_been_notified(task_t task, int pressurelevel)
3678  {
3679  	if (task == NULL) {
3680  		return FALSE;
3681  	}
3682  
3683  	if (pressurelevel == kVMPressureWarning) {
3684  		return task->low_mem_notified_warn ? TRUE : FALSE;
3685  	} else if (pressurelevel == kVMPressureCritical) {
3686  		return task->low_mem_notified_critical ? TRUE : FALSE;
3687  	} else {
3688  		return TRUE;
3689  	}
3690  }
3691  
3692  
3693  /*
3694   * Checks if the task is used for purging.
3695   *
3696   * Condition: task lock should be held while calling this function.
3697   */
3698  boolean_t
3699  task_used_for_purging(task_t task, int pressurelevel)
3700  {
3701  	if (task == NULL) {
3702  		return FALSE;
3703  	}
3704  
3705  	if (pressurelevel == kVMPressureWarning) {
3706  		return task->purged_memory_warn ? TRUE : FALSE;
3707  	} else if (pressurelevel == kVMPressureCritical) {
3708  		return task->purged_memory_critical ? TRUE : FALSE;
3709  	} else {
3710  		return TRUE;
3711  	}
3712  }
3713  
3714  
3715  /*
3716   * Mark the task as notified with memory notification.
3717   *
3718   * Condition: task lock should be held while calling this function.
3719   */
3720  void
3721  task_mark_has_been_notified(task_t task, int pressurelevel)
3722  {
3723  	if (task == NULL) {
3724  		return;
3725  	}
3726  
3727  	if (pressurelevel == kVMPressureWarning) {
3728  		task->low_mem_notified_warn = 1;
3729  	} else if (pressurelevel == kVMPressureCritical) {
3730  		task->low_mem_notified_critical = 1;
3731  	}
3732  }
3733  
3734  
3735  /*
3736   * Mark the task as purged.
3737   *
3738   * Condition: task lock should be held while calling this function.
3739   */
3740  void
3741  task_mark_used_for_purging(task_t task, int pressurelevel)
3742  {
3743  	if (task == NULL) {
3744  		return;
3745  	}
3746  
3747  	if (pressurelevel == kVMPressureWarning) {
3748  		task->purged_memory_warn = 1;
3749  	} else if (pressurelevel == kVMPressureCritical) {
3750  		task->purged_memory_critical = 1;
3751  	}
3752  }
3753  
3754  
3755  /*
3756   * Mark the task eligible for low memory notification.
3757   *
3758   * Condition: task lock should be held while calling this function.
3759   */
3760  void
3761  task_clear_has_been_notified(task_t task, int pressurelevel)
3762  {
3763  	if (task == NULL) {
3764  		return;
3765  	}
3766  
3767  	if (pressurelevel == kVMPressureWarning) {
3768  		task->low_mem_notified_warn = 0;
3769  	} else if (pressurelevel == kVMPressureCritical) {
3770  		task->low_mem_notified_critical = 0;
3771  	}
3772  }
3773  
3774  
3775  /*
3776   * Mark the task eligible for purging its purgeable memory.
3777   *
3778   * Condition: task lock should be held while calling this function.
3779   */
3780  void
3781  task_clear_used_for_purging(task_t task)
3782  {
3783  	if (task == NULL) {
3784  		return;
3785  	}
3786  
3787  	task->purged_memory_warn = 0;
3788  	task->purged_memory_critical = 0;
3789  }
3790  
3791  
3792  /*
3793   * Estimate task importance for purging its purgeable memory
3794   * and low memory notification.
3795   *
3796   * Importance is calculated in the following order of criteria:
3797   * -Task role : Background vs Foreground
3798   * -Boost status: Not boosted vs Boosted
3799   * -Darwin BG status.
3800   *
3801   * Returns: Estimated task importance. Less important task will have lower
3802   *          estimated importance.
3803   */
3804  int
3805  task_importance_estimate(task_t task)
3806  {
3807  	int task_importance = 0;
3808  
3809  	if (task == NULL) {
3810  		return 0;
3811  	}
3812  
3813  	if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION) {
3814  		task_importance += TASK_IMPORTANCE_FOREGROUND;
3815  	}
3816  
3817  	if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG) == 0) {
3818  		task_importance += TASK_IMPORTANCE_NOTDARWINBG;
3819  	}
3820  
3821  	return task_importance;
3822  }
3823  
3824  boolean_t
3825  task_has_assertions(task_t task)
3826  {
3827  	return task->task_imp_base->iit_assertcnt? TRUE : FALSE;
3828  }
3829  
3830  
3831  kern_return_t
3832  send_resource_violation(typeof(send_cpu_usage_violation) sendfunc,
3833      task_t violator,
3834      struct ledger_entry_info *linfo,
3835      resource_notify_flags_t flags)
3836  {
3837  #ifndef MACH_BSD
3838  	return KERN_NOT_SUPPORTED;
3839  #else
3840  	kern_return_t   kr = KERN_SUCCESS;
3841  	proc_t          proc = NULL;
3842  	posix_path_t    proc_path = "";
3843  	proc_name_t     procname = "<unknown>";
3844  	int             pid = -1;
3845  	clock_sec_t     secs;
3846  	clock_nsec_t    nsecs;
3847  	mach_timespec_t timestamp;
3848  	thread_t        curthread = current_thread();
3849  	ipc_port_t      dstport = MACH_PORT_NULL;
3850  
3851  	if (!violator) {
3852  		kr = KERN_INVALID_ARGUMENT; goto finish;
3853  	}
3854  
3855  	/* extract violator information */
3856  	task_lock(violator);
3857  	if (!(proc = get_bsdtask_info(violator))) {
3858  		task_unlock(violator);
3859  		kr = KERN_INVALID_ARGUMENT; goto finish;
3860  	}
3861  	(void)mig_strncpy(procname, proc_best_name(proc), sizeof(procname));
3862  	pid = task_pid(violator);
3863  	if (flags & kRNFatalLimitFlag) {
3864  		kr = proc_pidpathinfo_internal(proc, 0, proc_path,
3865  		    sizeof(proc_path), NULL);
3866  	}
3867  	task_unlock(violator);
3868  	if (kr) {
3869  		goto finish;
3870  	}
3871  
3872  	/* violation time ~ now */
3873  	clock_get_calendar_nanotime(&secs, &nsecs);
3874  	timestamp.tv_sec = (int32_t)secs;
3875  	timestamp.tv_nsec = (int32_t)nsecs;
3876  	/* 25567702 tracks widening mach_timespec_t */
3877  
3878  	/* send message */
3879  	kr = host_get_special_port(host_priv_self(), HOST_LOCAL_NODE,
3880  	    HOST_RESOURCE_NOTIFY_PORT, &dstport);
3881  	if (kr) {
3882  		goto finish;
3883  	}
3884  
3885  	thread_set_honor_qlimit(curthread);
3886  	kr = sendfunc(dstport,
3887  	    procname, pid, proc_path, timestamp,
3888  	    linfo->lei_balance, linfo->lei_last_refill,
3889  	    linfo->lei_limit, linfo->lei_refill_period,
3890  	    flags);
3891  	thread_clear_honor_qlimit(curthread);
3892  
3893  	ipc_port_release_send(dstport);
3894  
3895  finish:
3896  	return kr;
3897  #endif      /* MACH_BSD */
3898  }
3899  
3900  
3901  /*
3902   * Resource violations trace four 64-bit integers.  For K32, two additional
3903   * codes are allocated, the first with the low nibble doubled.  So if the K64
3904   * code is 0x042, the K32 codes would be 0x044 and 0x45.
3905   */
3906  #ifdef __LP64__
3907  void
3908  trace_resource_violation(uint16_t code,
3909      struct ledger_entry_info *linfo)
3910  {
3911  	KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, code),
3912  	    linfo->lei_balance, linfo->lei_last_refill,
3913  	    linfo->lei_limit, linfo->lei_refill_period);
3914  }
3915  #else /* K32 */
3916  /* TODO: create/find a trace_two_LLs() for K32 systems */
3917  #define MASK32 0xffffffff
3918  void
3919  trace_resource_violation(uint16_t code,
3920      struct ledger_entry_info *linfo)
3921  {
3922  	int8_t lownibble = (code & 0x3) * 2;
3923  	int16_t codeA = (code & 0xffc) | lownibble;
3924  	int16_t codeB = codeA + 1;
3925  
3926  	int32_t balance_high = (linfo->lei_balance >> 32) & MASK32;
3927  	int32_t balance_low = linfo->lei_balance & MASK32;
3928  	int32_t last_refill_high = (linfo->lei_last_refill >> 32) & MASK32;
3929  	int32_t last_refill_low = linfo->lei_last_refill & MASK32;
3930  
3931  	int32_t limit_high = (linfo->lei_limit >> 32) & MASK32;
3932  	int32_t limit_low = linfo->lei_limit & MASK32;
3933  	int32_t refill_period_high = (linfo->lei_refill_period >> 32) & MASK32;
3934  	int32_t refill_period_low = linfo->lei_refill_period & MASK32;
3935  
3936  	KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, codeA),
3937  	    balance_high, balance_low,
3938  	    last_refill_high, last_refill_low);
3939  	KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, codeB),
3940  	    limit_high, limit_low,
3941  	    refill_period_high, refill_period_low);
3942  }
3943  #endif /* K64/K32 */