task.h
1 /* 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_FREE_COPYRIGHT@ 30 */ 31 /* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 /* 57 */ 58 /* 59 * File: task.h 60 * Author: Avadis Tevanian, Jr. 61 * 62 * This file contains the structure definitions for tasks. 63 * 64 */ 65 /* 66 * Copyright (c) 1993 The University of Utah and 67 * the Computer Systems Laboratory (CSL). All rights reserved. 68 * 69 * Permission to use, copy, modify and distribute this software and its 70 * documentation is hereby granted, provided that both the copyright 71 * notice and this permission notice appear in all copies of the 72 * software, derivative works or modified versions, and any portions 73 * thereof, and that both notices appear in supporting documentation. 74 * 75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS 76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF 77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 78 * 79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any 80 * improvements that they make and grant CSL redistribution rights. 81 * 82 */ 83 /* 84 * NOTICE: This file was modified by McAfee Research in 2004 to introduce 85 * support for mandatory and extensible security protections. This notice 86 * is included in support of clause 2.2 (b) of the Apple Public License, 87 * Version 2.0. 88 * Copyright (c) 2005 SPARTA, Inc. 89 */ 90 91 #ifndef _KERN_TASK_H_ 92 #define _KERN_TASK_H_ 93 94 #include <kern/kern_types.h> 95 #include <mach/mach_types.h> 96 #include <sys/cdefs.h> 97 98 #ifdef XNU_KERNEL_PRIVATE 99 #include <kern/kern_cdata.h> 100 #include <mach/sfi_class.h> 101 #include <kern/counter.h> 102 #include <kern/queue.h> 103 #include <sys/kern_sysctl.h> 104 #endif /* XNU_KERNEL_PRIVATE */ 105 106 #ifdef MACH_KERNEL_PRIVATE 107 108 #include <mach/boolean.h> 109 #include <mach/port.h> 110 #include <mach/time_value.h> 111 #include <mach/message.h> 112 #include <mach/mach_param.h> 113 #include <mach/task_info.h> 114 #include <mach/exception_types.h> 115 #include <mach/vm_statistics.h> 116 #include <machine/task.h> 117 118 #if MONOTONIC 119 #include <machine/monotonic.h> 120 #endif /* MONOTONIC */ 121 122 #include <kern/cpu_data.h> 123 #include <kern/queue.h> 124 #include <kern/exception.h> 125 #include <kern/locks.h> 126 #include <security/_label.h> 127 #include <ipc/ipc_port.h> 128 129 #include <kern/thread.h> 130 #include <mach/coalition.h> 131 #include <stdatomic.h> 132 #include <os/refcnt.h> 133 134 struct _cpu_time_qos_stats { 135 uint64_t cpu_time_qos_default; 136 uint64_t cpu_time_qos_maintenance; 137 uint64_t cpu_time_qos_background; 138 uint64_t cpu_time_qos_utility; 139 uint64_t cpu_time_qos_legacy; 140 uint64_t cpu_time_qos_user_initiated; 141 uint64_t cpu_time_qos_user_interactive; 142 }; 143 144 struct task_writes_counters { 145 uint64_t task_immediate_writes; 146 uint64_t task_deferred_writes; 147 uint64_t task_invalidated_writes; 148 uint64_t task_metadata_writes; 149 }; 150 151 struct task_watchports; 152 #include <bank/bank_internal.h> 153 154 struct task { 155 /* Synchronization/destruction information */ 156 decl_lck_mtx_data(, lock); /* Task's lock */ 157 os_refcnt_t ref_count; /* Number of references to me */ 158 bool active; /* Task has not been terminated */ 159 bool ipc_active; /* IPC with the task ports is allowed */ 160 bool halting; /* Task is being halted */ 161 bool message_app_suspended; /* Let iokit know when pidsuspended */ 162 163 /* Virtual timers */ 164 uint32_t vtimers; 165 166 /* Miscellaneous */ 167 vm_map_t XNU_PTRAUTH_SIGNED_PTR("task.map") map; /* Address space description */ 168 queue_chain_t tasks; /* global list of tasks */ 169 struct task_watchports *watchports; /* watchports passed in spawn */ 170 turnstile_inheritor_t returnwait_inheritor; /* inheritor for task_wait */ 171 172 #if defined(CONFIG_SCHED_MULTIQ) 173 sched_group_t sched_group; 174 #endif /* defined(CONFIG_SCHED_MULTIQ) */ 175 176 /* Threads in this task */ 177 queue_head_t threads; 178 struct restartable_ranges *restartable_ranges; 179 180 processor_set_t pset_hint; 181 struct affinity_space *affinity_space; 182 183 int thread_count; 184 uint32_t active_thread_count; 185 int suspend_count; /* Internal scheduling only */ 186 187 /* User-visible scheduling information */ 188 integer_t user_stop_count; /* outstanding stops */ 189 integer_t legacy_stop_count; /* outstanding legacy stops */ 190 191 int16_t priority; /* base priority for threads */ 192 int16_t max_priority; /* maximum priority for threads */ 193 194 integer_t importance; /* priority offset (BSD 'nice' value) */ 195 196 /* Task security and audit tokens */ 197 security_token_t sec_token; 198 audit_token_t audit_token; 199 200 /* Statistics */ 201 uint64_t total_user_time; /* terminated threads only */ 202 uint64_t total_system_time; 203 uint64_t total_ptime; 204 uint64_t total_runnable_time; 205 206 /* IPC structures */ 207 decl_lck_mtx_data(, itk_lock_data); 208 /* 209 * Different flavors of task port. 210 * These flavors TASK_FLAVOR_* are defined in mach_types.h 211 */ 212 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_ports") itk_task_ports[TASK_SELF_PORT_COUNT]; 213 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_settable_self") itk_settable_self; /* a send right */ 214 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_self") itk_self; /* immovable/pinned task port, does not hold right */ 215 struct exception_action exc_actions[EXC_TYPES_COUNT]; 216 /* a send right each valid element */ 217 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_host") itk_host; /* a send right */ 218 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_bootstrap") itk_bootstrap; /* a send right */ 219 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_seatbelt") itk_seatbelt; /* a send right */ 220 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_gssd") itk_gssd; /* yet another send right */ 221 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_debug_control") itk_debug_control; /* send right for debugmode communications */ 222 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_access") itk_task_access; /* and another send right */ 223 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_resume") itk_resume; /* a receive right to resume this task */ 224 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_registered") itk_registered[TASK_PORT_REGISTER_MAX]; 225 /* all send rights */ 226 ipc_port_t * XNU_PTRAUTH_SIGNED_PTR("task.itk_dyld_notify") itk_dyld_notify; /* lazy send rights array of size DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT */ 227 228 struct ipc_space * XNU_PTRAUTH_SIGNED_PTR("task.itk_space") itk_space; 229 230 ledger_t ledger; 231 /* Synchronizer ownership information */ 232 queue_head_t semaphore_list; /* list of owned semaphores */ 233 int semaphores_owned; /* number of semaphores owned */ 234 235 unsigned int priv_flags; /* privilege resource flags */ 236 #define VM_BACKING_STORE_PRIV 0x1 237 238 MACHINE_TASK 239 240 counter_t faults; /* faults counter */ 241 integer_t decompressions; /* decompression counter */ 242 integer_t pageins; /* pageins counter */ 243 integer_t cow_faults; /* copy on write fault counter */ 244 integer_t messages_sent; /* messages sent counter */ 245 integer_t messages_received; /* messages received counter */ 246 integer_t syscalls_mach; /* mach system call counter */ 247 integer_t syscalls_unix; /* unix system call counter */ 248 uint32_t c_switch; /* total context switches */ 249 uint32_t p_switch; /* total processor switches */ 250 uint32_t ps_switch; /* total pset switches */ 251 252 #ifdef MACH_BSD 253 void * XNU_PTRAUTH_SIGNED_PTR("task.bsd_info") bsd_info; 254 #endif 255 kcdata_descriptor_t corpse_info; 256 uint64_t crashed_thread_id; 257 queue_chain_t corpse_tasks; 258 #ifdef CONFIG_MACF 259 struct label * crash_label; 260 uint8_t * mach_trap_filter_mask; /* Mach trap filter bitmask (len: mach_trap_count bits) */ 261 uint8_t * mach_kobj_filter_mask; /* Mach kobject filter bitmask (len: mach_kobj_count bits) */ 262 #endif 263 struct vm_shared_region *shared_region; 264 #if __has_feature(ptrauth_calls) 265 char *shared_region_id; /* determines which ptr auth key to use */ 266 bool shared_region_auth_remapped; /* authenticated sections ready for use */ 267 #endif /* __has_feature(ptrauth_calls) */ 268 volatile uint32_t t_flags; /* general-purpose task flags protected by task_lock (TL) */ 269 #define TF_NONE 0 270 #define TF_64B_ADDR 0x00000001 /* task has 64-bit addressing */ 271 #define TF_64B_DATA 0x00000002 /* task has 64-bit data registers */ 272 #define TF_CPUMON_WARNING 0x00000004 /* task has at least one thread in CPU usage warning zone */ 273 #define TF_WAKEMON_WARNING 0x00000008 /* task is in wakeups monitor warning zone */ 274 #define TF_TELEMETRY (TF_CPUMON_WARNING | TF_WAKEMON_WARNING) /* task is a telemetry participant */ 275 #define TF_GPU_DENIED 0x00000010 /* task is not allowed to access the GPU */ 276 #define TF_CORPSE 0x00000020 /* task is a corpse */ 277 #define TF_PENDING_CORPSE 0x00000040 /* task corpse has not been reported yet */ 278 #define TF_CORPSE_FORK 0x00000080 /* task is a forked corpse */ 279 #define TF_PLATFORM 0x00000400 /* task is a platform binary */ 280 #define TF_CA_CLIENT_WI 0x00000800 /* task has CA_CLIENT work interval */ 281 #define TF_DARKWAKE_MODE 0x00001000 /* task is in darkwake mode */ 282 #define TF_NO_SMT 0x00002000 /* task threads must not be paired with SMT threads */ 283 #define TF_FILTER_MSG 0x00004000 /* task calls into message filter callback before sending a message */ 284 #define TF_SYS_VERSION_COMPAT 0x00008000 /* shim task accesses to OS version data (macOS - app compatibility) */ 285 #define TF_PAC_EXC_FATAL 0x00010000 /* task is marked a corpse if a PAC exception occurs */ 286 #define TF_TECS 0x00020000 /* task threads must enable CPU security */ 287 #if defined(__x86_64__) 288 #define TF_INSN_COPY_OPTOUT 0x00040000 /* task threads opt out of unhandled-fault instruction stream collection */ 289 #endif 290 #define TF_COALITION_MEMBER 0x00080000 /* task is a member of a coalition */ 291 292 /* 293 * Task is running within a 64-bit address space. 294 */ 295 #define task_has_64Bit_addr(task) \ 296 (((task)->t_flags & TF_64B_ADDR) != 0) 297 #define task_set_64Bit_addr(task) \ 298 ((task)->t_flags |= TF_64B_ADDR) 299 #define task_clear_64Bit_addr(task) \ 300 ((task)->t_flags &= ~TF_64B_ADDR) 301 302 /* 303 * Task is using 64-bit machine state. 304 */ 305 #define task_has_64Bit_data(task) \ 306 (((task)->t_flags & TF_64B_DATA) != 0) 307 #define task_set_64Bit_data(task) \ 308 ((task)->t_flags |= TF_64B_DATA) 309 #define task_clear_64Bit_data(task) \ 310 ((task)->t_flags &= ~TF_64B_DATA) 311 312 #define task_is_a_corpse(task) \ 313 (((task)->t_flags & TF_CORPSE) != 0) 314 315 #define task_set_corpse(task) \ 316 ((task)->t_flags |= TF_CORPSE) 317 318 #define task_corpse_pending_report(task) \ 319 (((task)->t_flags & TF_PENDING_CORPSE) != 0) 320 321 #define task_set_corpse_pending_report(task) \ 322 ((task)->t_flags |= TF_PENDING_CORPSE) 323 324 #define task_clear_corpse_pending_report(task) \ 325 ((task)->t_flags &= ~TF_PENDING_CORPSE) 326 327 #define task_is_a_corpse_fork(task) \ 328 (((task)->t_flags & TF_CORPSE_FORK) != 0) 329 330 #define task_set_coalition_member(task) \ 331 ((task)->t_flags |= TF_COALITION_MEMBER) 332 333 #define task_clear_coalition_member(task) \ 334 ((task)->t_flags &= ~TF_COALITION_MEMBER) 335 336 #define task_is_coalition_member(task) \ 337 (((task)->t_flags & TF_COALITION_MEMBER) != 0) 338 339 uint32_t t_procflags; /* general-purpose task flags protected by proc_lock (PL) */ 340 #define TPF_NONE 0 341 #define TPF_DID_EXEC 0x00000001 /* task has been execed to a new task */ 342 #define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */ 343 #ifdef CONFIG_32BIT_TELEMETRY 344 #define TPF_LOG_32BIT_TELEMETRY 0x00000004 /* task should log identifying information */ 345 #endif 346 347 #define task_did_exec_internal(task) \ 348 (((task)->t_procflags & TPF_DID_EXEC) != 0) 349 350 #define task_is_exec_copy_internal(task) \ 351 (((task)->t_procflags & TPF_EXEC_COPY) != 0) 352 353 uint8_t t_returnwaitflags; 354 #define TWF_NONE 0 355 #define TRW_LRETURNWAIT 0x01 /* task is waiting for fork/posix_spawn/exec to complete */ 356 #define TRW_LRETURNWAITER 0x02 /* task is waiting for TRW_LRETURNWAIT to get cleared */ 357 358 mach_vm_address_t all_image_info_addr; /* dyld __all_image_info */ 359 mach_vm_size_t all_image_info_size; /* section location and size */ 360 361 #if KPC 362 #define TASK_KPC_FORCED_ALL_CTRS 0x2 /* Bit in "t_kpc" signifying this task forced all counters */ 363 uint32_t t_kpc; /* kpc flags */ 364 #endif /* KPC */ 365 366 boolean_t pidsuspended; /* pid_suspend called; no threads can execute */ 367 boolean_t frozen; /* frozen; private resident pages committed to swap */ 368 boolean_t changing_freeze_state; /* in the process of freezing or thawing */ 369 uint16_t policy_ru_cpu :4, 370 policy_ru_cpu_ext :4, 371 applied_ru_cpu :4, 372 applied_ru_cpu_ext :4; 373 uint8_t rusage_cpu_flags; 374 uint8_t rusage_cpu_percentage; /* Task-wide CPU limit percentage */ 375 uint8_t rusage_cpu_perthr_percentage; /* Per-thread CPU limit percentage */ 376 #if MACH_ASSERT 377 int8_t suspends_outstanding; /* suspends this task performed in excess of resumes */ 378 #endif 379 uint64_t rusage_cpu_interval; /* Task-wide CPU limit interval */ 380 uint64_t rusage_cpu_perthr_interval; /* Per-thread CPU limit interval */ 381 uint64_t rusage_cpu_deadline; 382 thread_call_t rusage_cpu_callt; 383 #if CONFIG_TASKWATCH 384 queue_head_t task_watchers; /* app state watcher threads */ 385 int num_taskwatchers; 386 int watchapplying; 387 #endif /* CONFIG_TASKWATCH */ 388 389 struct bank_task *bank_context; /* pointer to per task bank structure */ 390 391 #if IMPORTANCE_INHERITANCE 392 struct ipc_importance_task *task_imp_base; /* Base of IPC importance chain */ 393 #endif /* IMPORTANCE_INHERITANCE */ 394 395 vm_extmod_statistics_data_t extmod_statistics; 396 397 struct task_requested_policy requested_policy; 398 struct task_effective_policy effective_policy; 399 400 /* 401 * Can be merged with imp_donor bits, once the IMPORTANCE_INHERITANCE macro goes away. 402 */ 403 uint32_t low_mem_notified_warn :1, /* warning low memory notification is sent to the task */ 404 low_mem_notified_critical :1, /* critical low memory notification is sent to the task */ 405 purged_memory_warn :1, /* purgeable memory of the task is purged for warning level pressure */ 406 purged_memory_critical :1, /* purgeable memory of the task is purged for critical level pressure */ 407 low_mem_privileged_listener :1, /* if set, task would like to know about pressure changes before other tasks on the system */ 408 mem_notify_reserved :27; /* reserved for future use */ 409 410 uint32_t memlimit_is_active :1, /* if set, use active attributes, otherwise use inactive attributes */ 411 memlimit_is_fatal :1, /* if set, exceeding current memlimit will prove fatal to the task */ 412 memlimit_active_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds active memory limit */ 413 memlimit_inactive_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds inactive memory limit */ 414 memlimit_attrs_reserved :28; /* reserved for future use */ 415 416 io_stat_info_t task_io_stats; 417 418 struct task_writes_counters task_writes_counters_internal; 419 struct task_writes_counters task_writes_counters_external; 420 421 /* 422 * The cpu_time_qos_stats fields are protected by the task lock 423 */ 424 struct _cpu_time_qos_stats cpu_time_eqos_stats; 425 struct _cpu_time_qos_stats cpu_time_rqos_stats; 426 427 /* Statistics accumulated for terminated threads from this task */ 428 uint32_t task_timer_wakeups_bin_1; 429 uint32_t task_timer_wakeups_bin_2; 430 uint64_t task_gpu_ns; 431 uint64_t task_energy; 432 433 #if MONOTONIC 434 /* Read and written under task_lock */ 435 struct mt_task task_monotonic; 436 #endif /* MONOTONIC */ 437 438 uint8_t task_can_transfer_memory_ownership; 439 uint8_t task_objects_disowning; 440 uint8_t task_objects_disowned; 441 /* # of purgeable volatile VM objects owned by this task: */ 442 int task_volatile_objects; 443 /* # of purgeable but not volatile VM objects owned by this task: */ 444 int task_nonvolatile_objects; 445 int task_owned_objects; 446 queue_head_t task_objq; 447 decl_lck_mtx_data(, task_objq_lock); /* protects "task_objq" */ 448 449 unsigned int task_thread_limit:16; 450 #if __arm64__ 451 unsigned int task_legacy_footprint:1; 452 unsigned int task_extra_footprint_limit:1; 453 unsigned int task_ios13extended_footprint_limit:1; 454 #endif /* __arm64__ */ 455 unsigned int task_region_footprint:1; 456 unsigned int task_has_crossed_thread_limit:1; 457 uint32_t exec_token; 458 /* 459 * A task's coalition set is "adopted" in task_create_internal 460 * and unset in task_deallocate_internal, so each array member 461 * can be referenced without the task lock. 462 * Note: these fields are protected by coalition->lock, 463 * not the task lock. 464 */ 465 coalition_t coalition[COALITION_NUM_TYPES]; 466 queue_chain_t task_coalition[COALITION_NUM_TYPES]; 467 uint64_t dispatchqueue_offset; 468 469 #if DEVELOPMENT || DEBUG 470 boolean_t task_unnested; 471 int task_disconnected_count; 472 #endif 473 474 #if HYPERVISOR 475 void * XNU_PTRAUTH_SIGNED_PTR("task.hv_task_target") hv_task_target; /* hypervisor virtual machine object associated with this task */ 476 #endif /* HYPERVISOR */ 477 478 #if CONFIG_SECLUDED_MEMORY 479 uint8_t task_can_use_secluded_mem; 480 uint8_t task_could_use_secluded_mem; 481 uint8_t task_could_also_use_secluded_mem; 482 uint8_t task_suppressed_secluded; 483 #endif /* CONFIG_SECLUDED_MEMORY */ 484 485 task_exc_guard_behavior_t task_exc_guard; 486 487 queue_head_t io_user_clients; 488 489 mach_vm_address_t mach_header_vm_address; 490 491 uint32_t loadTag; /* dext ID used for logging identity */ 492 #if CONFIG_FREEZE 493 queue_head_t task_frozen_cseg_q; /* queue of csegs frozen to NAND */ 494 #endif /* CONFIG_FREEZE */ 495 #if CONFIG_PHYS_WRITE_ACCT 496 uint64_t task_fs_metadata_writes; 497 #endif /* CONFIG_PHYS_WRITE_ACCT */ 498 uint32_t task_shared_region_slide; /* cached here to avoid locking during telemetry */ 499 uuid_t task_shared_region_uuid; 500 }; 501 502 /* 503 * EXC_GUARD default delivery behavior for optional Mach port and VM guards. 504 * Applied to new tasks at creation time. 505 */ 506 extern task_exc_guard_behavior_t task_exc_guard_default; 507 508 extern kern_return_t 509 task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *); 510 511 static inline void 512 task_require(struct task *task) 513 { 514 zone_id_require(ZONE_ID_TASK, sizeof(struct task), task); 515 } 516 517 #define task_lock(task) lck_mtx_lock(&(task)->lock) 518 #define task_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->lock, LCK_MTX_ASSERT_OWNED) 519 #define task_lock_try(task) lck_mtx_try_lock(&(task)->lock) 520 #define task_unlock(task) lck_mtx_unlock(&(task)->lock) 521 522 #define task_objq_lock_init(task) lck_mtx_init(&(task)->task_objq_lock, &vm_object_lck_grp, &vm_object_lck_attr) 523 #define task_objq_lock_destroy(task) lck_mtx_destroy(&(task)->task_objq_lock, &vm_object_lck_grp) 524 #define task_objq_lock(task) lck_mtx_lock(&(task)->task_objq_lock) 525 #define task_objq_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->task_objq_lock, LCK_MTX_ASSERT_OWNED) 526 #define task_objq_lock_try(task) lck_mtx_try_lock(&(task)->task_objq_lock) 527 #define task_objq_unlock(task) lck_mtx_unlock(&(task)->task_objq_lock) 528 529 #define itk_lock_init(task) lck_mtx_init(&(task)->itk_lock_data, &ipc_lck_grp, &ipc_lck_attr) 530 #define itk_lock_destroy(task) lck_mtx_destroy(&(task)->itk_lock_data, &ipc_lck_grp) 531 #define itk_lock(task) lck_mtx_lock(&(task)->itk_lock_data) 532 #define itk_unlock(task) lck_mtx_unlock(&(task)->itk_lock_data) 533 534 /* task clear return wait flags */ 535 #define TCRW_CLEAR_INITIAL_WAIT 0x1 536 #define TCRW_CLEAR_FINAL_WAIT 0x2 537 #define TCRW_CLEAR_ALL_WAIT (TCRW_CLEAR_INITIAL_WAIT | TCRW_CLEAR_FINAL_WAIT) 538 539 #define TASK_REFERENCE_LEAK_DEBUG 0 540 541 #if TASK_REFERENCE_LEAK_DEBUG 542 extern void task_reference_internal(task_t task); 543 extern os_ref_count_t task_deallocate_internal(task_t task); 544 #else 545 #define task_reference_internal(task) \ 546 MACRO_BEGIN \ 547 task_require(task); \ 548 os_ref_retain(&(task)->ref_count); \ 549 MACRO_END 550 #define task_deallocate_internal(task) os_ref_release(&(task)->ref_count) 551 #endif 552 553 #define task_reference(task) \ 554 MACRO_BEGIN \ 555 if ((task) != TASK_NULL) \ 556 task_reference_internal(task); \ 557 MACRO_END 558 559 extern kern_return_t kernel_task_create( 560 task_t task, 561 vm_offset_t map_base, 562 vm_size_t map_size, 563 task_t *child); 564 565 /* Initialize task module */ 566 extern void task_init(void); 567 568 /* coalition_init() calls this to initialize ledgers before task_init() */ 569 extern void init_task_ledgers(void); 570 571 #define current_task_fast() (current_thread()->task) 572 #define current_task() current_task_fast() 573 574 extern bool task_is_driver(task_t task); 575 576 extern lck_attr_t task_lck_attr; 577 extern lck_grp_t task_lck_grp; 578 579 struct task_watchport_elem { 580 task_t twe_task; 581 ipc_port_t twe_port; /* (Space lock) */ 582 }; 583 584 struct task_watchports { 585 os_refcnt_t tw_refcount; /* (Space lock) */ 586 task_t tw_task; /* (Space lock) & tw_refcount == 0 */ 587 thread_t tw_thread; /* (Space lock) & tw_refcount == 0 */ 588 uint32_t tw_elem_array_count; /* (Space lock) */ 589 struct task_watchport_elem tw_elem[]; /* (Space lock) & (Portlock) & (mq lock) */ 590 }; 591 592 #define task_watchports_retain(x) (os_ref_retain(&(x)->tw_refcount)) 593 #define task_watchports_release(x) (os_ref_release(&(x)->tw_refcount)) 594 595 #define task_watchport_elem_init(elem, task, port) \ 596 do { \ 597 (elem)->twe_task = (task); \ 598 (elem)->twe_port = (port); \ 599 } while(0) 600 601 #define task_watchport_elem_clear(elem) task_watchport_elem_init((elem), NULL, NULL) 602 603 extern void 604 task_add_turnstile_watchports( 605 task_t task, 606 thread_t thread, 607 ipc_port_t *portwatch_ports, 608 uint32_t portwatch_count); 609 610 extern void 611 task_watchport_elem_deallocate( 612 struct task_watchport_elem *watchport_elem); 613 614 extern boolean_t 615 task_has_watchports(task_t task); 616 617 void 618 task_dyld_process_info_update_helper( 619 task_t task, 620 size_t active_count, 621 vm_map_address_t magic_addr, 622 ipc_port_t *release_ports, 623 size_t release_count); 624 625 #else /* MACH_KERNEL_PRIVATE */ 626 627 __BEGIN_DECLS 628 629 extern task_t current_task(void); 630 631 extern void task_reference(task_t task); 632 extern bool task_is_driver(task_t task); 633 634 #define TF_NONE 0 635 636 #define TWF_NONE 0 637 #define TRW_LRETURNWAIT 0x01 /* task is waiting for fork/posix_spawn/exec to complete */ 638 #define TRW_LRETURNWAITER 0x02 /* task is waiting for TRW_LRETURNWAIT to get cleared */ 639 640 /* task clear return wait flags */ 641 #define TCRW_CLEAR_INITIAL_WAIT 0x1 642 #define TCRW_CLEAR_FINAL_WAIT 0x2 643 #define TCRW_CLEAR_ALL_WAIT (TCRW_CLEAR_INITIAL_WAIT | TCRW_CLEAR_FINAL_WAIT) 644 645 646 #define TPF_NONE 0 647 #define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */ 648 649 650 __END_DECLS 651 652 #endif /* MACH_KERNEL_PRIVATE */ 653 654 __BEGIN_DECLS 655 656 #ifdef KERNEL_PRIVATE 657 extern boolean_t task_is_app_suspended(task_t task); 658 extern bool task_is_exotic(task_t task); 659 extern bool task_is_alien(task_t task); 660 #endif 661 662 #ifdef XNU_KERNEL_PRIVATE 663 664 /* Hold all threads in a task */ 665 extern kern_return_t task_hold( 666 task_t task); 667 668 /* Wait for task to stop running, either just to get off CPU or to cease being runnable */ 669 extern kern_return_t task_wait( 670 task_t task, 671 boolean_t until_not_runnable); 672 673 /* Release hold on all threads in a task */ 674 extern kern_return_t task_release( 675 task_t task); 676 677 /* Suspend/resume a task where the kernel owns the suspend count */ 678 extern kern_return_t task_suspend_internal( task_t task); 679 extern kern_return_t task_resume_internal( task_t task); 680 681 /* Suspends a task by placing a hold on its threads */ 682 extern kern_return_t task_pidsuspend( 683 task_t task); 684 685 /* Resumes a previously paused task */ 686 extern kern_return_t task_pidresume( 687 task_t task); 688 689 extern kern_return_t task_send_trace_memory( 690 task_t task, 691 uint32_t pid, 692 uint64_t uniqueid); 693 694 extern void task_remove_turnstile_watchports( 695 task_t task); 696 697 extern void task_transfer_turnstile_watchports( 698 task_t old_task, 699 task_t new_task, 700 thread_t new_thread); 701 702 #if DEVELOPMENT || DEBUG 703 704 extern kern_return_t task_disconnect_page_mappings( 705 task_t task); 706 #endif 707 708 extern void tasks_system_suspend(boolean_t suspend); 709 710 #if CONFIG_FREEZE 711 712 /* Freeze a task's resident pages */ 713 extern kern_return_t task_freeze( 714 task_t task, 715 uint32_t *purgeable_count, 716 uint32_t *wired_count, 717 uint32_t *clean_count, 718 uint32_t *dirty_count, 719 uint32_t dirty_budget, 720 uint32_t *shared_count, 721 int *freezer_error_code, 722 boolean_t eval_only); 723 724 /* Thaw a currently frozen task */ 725 extern kern_return_t task_thaw( 726 task_t task); 727 728 typedef enum { 729 CREDIT_TO_SWAP = 1, 730 DEBIT_FROM_SWAP = 2 731 } freezer_acct_op_t; 732 733 extern void task_update_frozen_to_swap_acct( 734 task_t task, 735 int64_t amount, 736 freezer_acct_op_t op); 737 738 #endif /* CONFIG_FREEZE */ 739 740 /* Halt all other threads in the current task */ 741 extern kern_return_t task_start_halt( 742 task_t task); 743 744 /* Wait for other threads to halt and free halting task resources */ 745 extern void task_complete_halt( 746 task_t task); 747 748 extern kern_return_t task_terminate_internal( 749 task_t task); 750 751 extern kern_return_t task_create_internal( 752 task_t parent_task, 753 coalition_t *parent_coalitions, 754 boolean_t inherit_memory, 755 boolean_t is_64bit, 756 boolean_t is_64bit_data, 757 uint32_t flags, 758 uint32_t procflags, 759 uint8_t t_returnwaitflags, 760 task_t *child_task); /* OUT */ 761 762 extern kern_return_t task_set_special_port_internal( 763 task_t task, 764 int which, 765 ipc_port_t port); 766 767 extern kern_return_t task_info( 768 task_t task, 769 task_flavor_t flavor, 770 task_info_t task_info_out, 771 mach_msg_type_number_t *task_info_count); 772 773 extern void task_power_info_locked( 774 task_t task, 775 task_power_info_t info, 776 gpu_energy_data_t gpu_energy, 777 task_power_info_v2_t infov2, 778 uint64_t *runnable_time); 779 780 extern uint64_t task_gpu_utilisation( 781 task_t task); 782 783 extern uint64_t task_energy( 784 task_t task); 785 786 extern uint64_t task_cpu_ptime( 787 task_t task); 788 extern void task_update_cpu_time_qos_stats( 789 task_t task, 790 uint64_t *eqos_stats, 791 uint64_t *rqos_stats); 792 793 extern void task_vtimer_set( 794 task_t task, 795 integer_t which); 796 797 extern void task_vtimer_clear( 798 task_t task, 799 integer_t which); 800 801 extern void task_vtimer_update( 802 task_t task, 803 integer_t which, 804 uint32_t *microsecs); 805 806 #define TASK_VTIMER_USER 0x01 807 #define TASK_VTIMER_PROF 0x02 808 #define TASK_VTIMER_RLIM 0x04 809 810 extern void task_set_64bit( 811 task_t task, 812 boolean_t is_64bit, 813 boolean_t is_64bit_data); 814 815 extern boolean_t task_get_64bit_data( 816 task_t task); 817 818 extern void task_set_platform_binary( 819 task_t task, 820 boolean_t is_platform); 821 extern bool task_set_ca_client_wi( 822 task_t task, 823 boolean_t ca_client_wi); 824 825 extern void task_set_dyld_info( 826 task_t task, 827 mach_vm_address_t addr, 828 mach_vm_size_t size); 829 830 extern void task_set_mach_header_address( 831 task_t task, 832 mach_vm_address_t addr); 833 834 /* Get number of activations in a task */ 835 extern int get_task_numacts( 836 task_t task); 837 838 struct label; 839 extern kern_return_t task_collect_crash_info( 840 task_t task, 841 #if CONFIG_MACF 842 struct label *crash_label, 843 #endif 844 int is_corpse_fork); 845 void task_port_notify(mach_msg_header_t *msg); 846 void task_port_with_flavor_notify(mach_msg_header_t *msg); 847 void task_wait_till_threads_terminate_locked(task_t task); 848 849 /* JMM - should just be temporary (implementation in bsd_kern still) */ 850 extern void set_bsdtask_info(task_t, void *); 851 extern uint32_t set_task_loadTag(task_t task, uint32_t loadTag); 852 extern vm_map_t get_task_map_reference(task_t); 853 extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t); 854 extern pmap_t get_task_pmap(task_t); 855 extern uint64_t get_task_resident_size(task_t); 856 extern uint64_t get_task_compressed(task_t); 857 extern uint64_t get_task_resident_max(task_t); 858 extern uint64_t get_task_phys_footprint(task_t); 859 #if CONFIG_LEDGER_INTERVAL_MAX 860 extern uint64_t get_task_phys_footprint_interval_max(task_t, int reset); 861 #endif /* CONFIG_FOOTPRINT_INTERVAL_MAX */ 862 extern uint64_t get_task_phys_footprint_lifetime_max(task_t); 863 extern uint64_t get_task_phys_footprint_limit(task_t); 864 extern uint64_t get_task_purgeable_size(task_t); 865 extern uint64_t get_task_cpu_time(task_t); 866 extern uint64_t get_task_dispatchqueue_offset(task_t); 867 extern uint64_t get_task_dispatchqueue_serialno_offset(task_t); 868 extern uint64_t get_task_dispatchqueue_label_offset(task_t); 869 extern uint64_t get_task_uniqueid(task_t task); 870 extern int get_task_version(task_t task); 871 872 extern uint64_t get_task_internal(task_t); 873 extern uint64_t get_task_internal_compressed(task_t); 874 extern uint64_t get_task_purgeable_nonvolatile(task_t); 875 extern uint64_t get_task_purgeable_nonvolatile_compressed(task_t); 876 extern uint64_t get_task_iokit_mapped(task_t); 877 extern uint64_t get_task_alternate_accounting(task_t); 878 extern uint64_t get_task_alternate_accounting_compressed(task_t); 879 extern uint64_t get_task_memory_region_count(task_t); 880 extern uint64_t get_task_page_table(task_t); 881 #if CONFIG_FREEZE 882 extern uint64_t get_task_frozen_to_swap(task_t); 883 #endif 884 extern uint64_t get_task_network_nonvolatile(task_t); 885 extern uint64_t get_task_network_nonvolatile_compressed(task_t); 886 extern uint64_t get_task_wired_mem(task_t); 887 extern uint32_t get_task_loadTag(task_t task); 888 889 extern uint64_t get_task_tagged_footprint(task_t task); 890 extern uint64_t get_task_tagged_footprint_compressed(task_t task); 891 extern uint64_t get_task_media_footprint(task_t task); 892 extern uint64_t get_task_media_footprint_compressed(task_t task); 893 extern uint64_t get_task_graphics_footprint(task_t task); 894 extern uint64_t get_task_graphics_footprint_compressed(task_t task); 895 extern uint64_t get_task_neural_footprint(task_t task); 896 extern uint64_t get_task_neural_footprint_compressed(task_t task); 897 898 extern kern_return_t task_convert_phys_footprint_limit(int, int *); 899 extern kern_return_t task_set_phys_footprint_limit_internal(task_t, int, int *, boolean_t, boolean_t); 900 extern kern_return_t task_get_phys_footprint_limit(task_t task, int *limit_mb); 901 902 /* Jetsam memlimit attributes */ 903 extern boolean_t task_get_memlimit_is_active(task_t task); 904 extern boolean_t task_get_memlimit_is_fatal(task_t task); 905 extern void task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active); 906 extern void task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal); 907 extern boolean_t task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active); 908 extern void task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active); 909 910 extern void task_set_thread_limit(task_t task, uint16_t thread_limit); 911 912 #if XNU_TARGET_OS_OSX 913 extern boolean_t task_has_system_version_compat_enabled(task_t task); 914 extern void task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat); 915 #endif 916 917 extern boolean_t is_kerneltask(task_t task); 918 extern boolean_t is_corpsetask(task_t task); 919 920 extern kern_return_t check_actforsig(task_t task, thread_t thread, int setast); 921 922 extern kern_return_t machine_task_get_state( 923 task_t task, 924 int flavor, 925 thread_state_t state, 926 mach_msg_type_number_t *state_count); 927 928 extern kern_return_t machine_task_set_state( 929 task_t task, 930 int flavor, 931 thread_state_t state, 932 mach_msg_type_number_t state_count); 933 934 extern void machine_task_terminate(task_t task); 935 936 struct _task_ledger_indices { 937 int cpu_time; 938 int tkm_private; 939 int tkm_shared; 940 int phys_mem; 941 int wired_mem; 942 int internal; 943 int iokit_mapped; 944 int alternate_accounting; 945 int alternate_accounting_compressed; 946 int page_table; 947 int phys_footprint; 948 int internal_compressed; 949 int purgeable_volatile; 950 int purgeable_nonvolatile; 951 int purgeable_volatile_compressed; 952 int purgeable_nonvolatile_compressed; 953 int tagged_nofootprint; 954 int tagged_footprint; 955 int tagged_nofootprint_compressed; 956 int tagged_footprint_compressed; 957 int network_volatile; 958 int network_nonvolatile; 959 int network_volatile_compressed; 960 int network_nonvolatile_compressed; 961 int media_nofootprint; 962 int media_footprint; 963 int media_nofootprint_compressed; 964 int media_footprint_compressed; 965 int graphics_nofootprint; 966 int graphics_footprint; 967 int graphics_nofootprint_compressed; 968 int graphics_footprint_compressed; 969 int neural_nofootprint; 970 int neural_footprint; 971 int neural_nofootprint_compressed; 972 int neural_footprint_compressed; 973 int platform_idle_wakeups; 974 int interrupt_wakeups; 975 #if CONFIG_SCHED_SFI 976 int sfi_wait_times[MAX_SFI_CLASS_ID]; 977 #endif /* CONFIG_SCHED_SFI */ 978 int cpu_time_billed_to_me; 979 int cpu_time_billed_to_others; 980 int physical_writes; 981 int logical_writes; 982 int logical_writes_to_external; 983 int energy_billed_to_me; 984 int energy_billed_to_others; 985 #if DEBUG || DEVELOPMENT 986 int pages_grabbed; 987 int pages_grabbed_kern; 988 int pages_grabbed_iopl; 989 int pages_grabbed_upl; 990 #endif 991 #if CONFIG_FREEZE 992 int frozen_to_swap; 993 #endif /* CONFIG_FREEZE */ 994 #if CONFIG_PHYS_WRITE_ACCT 995 int fs_metadata_writes; 996 #endif /* CONFIG_PHYS_WRITE_ACCT */ 997 }; 998 extern struct _task_ledger_indices task_ledgers; 999 1000 /* requires task to be unlocked, returns a referenced thread */ 1001 thread_t task_findtid(task_t task, uint64_t tid); 1002 int pid_from_task(task_t task); 1003 1004 extern kern_return_t task_wakeups_monitor_ctl(task_t task, uint32_t *rate_hz, int32_t *flags); 1005 extern kern_return_t task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags); 1006 extern void task_rollup_accounting_info(task_t new_task, task_t parent_task); 1007 extern kern_return_t task_io_monitor_ctl(task_t task, uint32_t *flags); 1008 extern void task_set_did_exec_flag(task_t task); 1009 extern void task_clear_exec_copy_flag(task_t task); 1010 extern boolean_t task_is_exec_copy(task_t); 1011 extern boolean_t task_did_exec(task_t task); 1012 #ifdef CONFIG_32BIT_TELEMETRY 1013 extern boolean_t task_consume_32bit_log_flag(task_t task); 1014 extern void task_set_32bit_log_flag(task_t task); 1015 #endif /* CONFIG_32BIT_TELEMETRY */ 1016 extern boolean_t task_is_active(task_t task); 1017 extern boolean_t task_is_halting(task_t task); 1018 extern void task_clear_return_wait(task_t task, uint32_t flags); 1019 extern void task_wait_to_return(void) __attribute__((noreturn)); 1020 extern event_t task_get_return_wait_event(task_t task); 1021 1022 extern void task_bank_reset(task_t task); 1023 extern void task_bank_init(task_t task); 1024 1025 #if CONFIG_ARCADE 1026 extern void task_prep_arcade(task_t task, thread_t thread); 1027 #endif /* CONFIG_ARCADE */ 1028 1029 extern int task_pid(task_t task); 1030 1031 #if __has_feature(ptrauth_calls) 1032 char *task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *); 1033 void task_set_shared_region_id(task_t task, char *id); 1034 #endif /* __has_feature(ptrauth_calls) */ 1035 1036 extern boolean_t task_has_assertions(task_t task); 1037 /* End task_policy */ 1038 1039 extern void task_set_gpu_denied(task_t task, boolean_t denied); 1040 extern boolean_t task_is_gpu_denied(task_t task); 1041 1042 extern queue_head_t * task_io_user_clients(task_t task); 1043 extern void task_set_message_app_suspended(task_t task, boolean_t enable); 1044 1045 extern void task_copy_fields_for_exec(task_t dst_task, task_t src_task); 1046 1047 extern void task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num); 1048 1049 extern void task_set_filter_msg_flag(task_t task, boolean_t flag); 1050 extern boolean_t task_get_filter_msg_flag(task_t task); 1051 1052 extern void task_transfer_mach_filter_bits(task_t new_task, task_t old_mask); 1053 1054 #if __has_feature(ptrauth_calls) 1055 extern bool task_is_pac_exception_fatal(task_t task); 1056 extern void task_set_pac_exception_fatal_flag(task_t task); 1057 #endif /*__has_feature(ptrauth_calls)*/ 1058 1059 extern void task_set_tecs(task_t task); 1060 1061 #endif /* XNU_KERNEL_PRIVATE */ 1062 1063 #ifdef KERNEL_PRIVATE 1064 1065 extern void *get_bsdtask_info(task_t); 1066 extern void *get_bsdthreadtask_info(thread_t); 1067 extern void task_bsdtask_kill(task_t); 1068 extern vm_map_t get_task_map(task_t); 1069 extern ledger_t get_task_ledger(task_t); 1070 1071 extern boolean_t get_task_pidsuspended(task_t); 1072 extern boolean_t get_task_suspended(task_t); 1073 extern boolean_t get_task_frozen(task_t); 1074 1075 /* Convert from a task to a port */ 1076 extern ipc_port_t convert_task_to_port(task_t); 1077 extern ipc_port_t convert_task_to_port_pinned(task_t); 1078 extern ipc_port_t convert_task_name_to_port(task_name_t); 1079 extern ipc_port_t convert_task_inspect_to_port(task_inspect_t); 1080 extern ipc_port_t convert_task_read_to_port(task_read_t); 1081 extern ipc_port_t convert_task_suspension_token_to_port(task_suspension_token_t task); 1082 1083 /* Convert from a port (in this case, an SO right to a task's resume port) to a task. */ 1084 extern task_suspension_token_t convert_port_to_task_suspension_token(ipc_port_t port); 1085 1086 extern boolean_t task_suspension_notify(mach_msg_header_t *); 1087 1088 #define TASK_WRITE_IMMEDIATE 0x1 1089 #define TASK_WRITE_DEFERRED 0x2 1090 #define TASK_WRITE_INVALIDATED 0x4 1091 #define TASK_WRITE_METADATA 0x8 1092 extern void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp); 1093 1094 __enum_decl(task_balance_flags_t, uint8_t, { 1095 TASK_BALANCE_CREDIT = 0x1, 1096 TASK_BALANCE_DEBIT = 0x2, 1097 }); 1098 1099 __enum_decl(task_physical_write_flavor_t, uint8_t, { 1100 TASK_PHYSICAL_WRITE_METADATA = 0x1, 1101 }); 1102 extern void task_update_physical_writes(task_t task, task_physical_write_flavor_t flavor, 1103 uint64_t io_size, task_balance_flags_t flags); 1104 1105 #if CONFIG_SECLUDED_MEMORY 1106 extern void task_set_can_use_secluded_mem( 1107 task_t task, 1108 boolean_t can_use_secluded_mem); 1109 extern void task_set_could_use_secluded_mem( 1110 task_t task, 1111 boolean_t could_use_secluded_mem); 1112 extern void task_set_could_also_use_secluded_mem( 1113 task_t task, 1114 boolean_t could_also_use_secluded_mem); 1115 extern boolean_t task_can_use_secluded_mem( 1116 task_t task, 1117 boolean_t is_allocate); 1118 extern boolean_t task_could_use_secluded_mem(task_t task); 1119 extern boolean_t task_could_also_use_secluded_mem(task_t task); 1120 #endif /* CONFIG_SECLUDED_MEMORY */ 1121 1122 extern void task_set_darkwake_mode(task_t, boolean_t); 1123 extern boolean_t task_get_darkwake_mode(task_t); 1124 1125 #if __arm64__ 1126 extern void task_set_legacy_footprint(task_t task); 1127 extern void task_set_extra_footprint_limit(task_t task); 1128 extern void task_set_ios13extended_footprint_limit(task_t task); 1129 #endif /* __arm64__ */ 1130 1131 #if CONFIG_MACF 1132 extern struct label *get_task_crash_label(task_t task); 1133 #endif /* CONFIG_MACF */ 1134 1135 extern int get_task_cdhash(task_t task, char cdhash[]); 1136 1137 #endif /* KERNEL_PRIVATE */ 1138 1139 extern task_t kernel_task; 1140 1141 extern void task_deallocate( 1142 task_t task); 1143 1144 extern void task_name_deallocate( 1145 task_name_t task_name); 1146 1147 extern void task_policy_set_deallocate( 1148 task_policy_set_t task_policy_set); 1149 1150 extern void task_policy_get_deallocate( 1151 task_policy_get_t task_policy_get); 1152 1153 extern void task_inspect_deallocate( 1154 task_inspect_t task_inspect); 1155 1156 extern void task_read_deallocate( 1157 task_read_t task_read); 1158 1159 extern void task_suspension_token_deallocate( 1160 task_suspension_token_t token); 1161 1162 extern boolean_t task_self_region_footprint(void); 1163 extern void task_self_region_footprint_set(boolean_t newval); 1164 extern void task_ledgers_footprint(ledger_t ledger, 1165 ledger_amount_t *ledger_resident, 1166 ledger_amount_t *ledger_compressed); 1167 extern void task_set_memory_ownership_transfer( 1168 task_t task, 1169 boolean_t value); 1170 1171 1172 __END_DECLS 1173 1174 #endif /* _KERN_TASK_H_ */