/ src / launchd / src / core.c
core.c
    1  /*
    2   * @APPLE_APACHE_LICENSE_HEADER_START@
    3   * 
    4   * Licensed under the Apache License, Version 2.0 (the "License");
    5   * you may not use this file except in compliance with the License.
    6   * You may obtain a copy of the License at
    7   * 
    8   *     http://www.apache.org/licenses/LICENSE-2.0
    9   * 
   10   * Unless required by applicable law or agreed to in writing, software
   11   * distributed under the License is distributed on an "AS IS" BASIS,
   12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   13   * See the License for the specific language governing permissions and
   14   * limitations under the License.
   15   * 
   16   * @APPLE_APACHE_LICENSE_HEADER_END@
   17   */
   18  
   19  #include "config.h"
   20  #include "core.h"
   21  #include "internal.h"
   22  #include "helper.h"
   23  
   24  #include <TargetConditionals.h>
   25  #include <mach/mach.h>
   26  #include <mach/mach_error.h>
   27  #include <mach/boolean.h>
   28  #include <mach/message.h>
   29  #include <mach/notify.h>
   30  #include <mach/mig_errors.h>
   31  #include <mach/mach_traps.h>
   32  #include <mach/mach_interface.h>
   33  #include <mach/host_info.h>
   34  #include <mach/mach_host.h>
   35  #include <mach/exception.h>
   36  #include <mach/host_reboot.h>
   37  #include <sys/types.h>
   38  #include <sys/queue.h>
   39  #include <sys/event.h>
   40  #include <sys/stat.h>
   41  #include <sys/ucred.h>
   42  #include <sys/fcntl.h>
   43  #include <sys/un.h>
   44  #include <sys/reboot.h>
   45  #include <sys/wait.h>
   46  #include <sys/sysctl.h>
   47  #include <sys/sockio.h>
   48  #include <sys/time.h>
   49  #include <sys/resource.h>
   50  #include <sys/ioctl.h>
   51  #include <sys/mount.h>
   52  #include <sys/pipe.h>
   53  #include <sys/mman.h>
   54  #include <sys/socket.h>
   55  #include <sys/syscall.h>
   56  #include <sys/kern_memorystatus.h>
   57  #include <net/if.h>
   58  #include <netinet/in.h>
   59  #include <netinet/in_var.h>
   60  #include <netinet6/nd6.h>
   61  #include <bsm/libbsm.h>
   62  #include <unistd.h>
   63  #include <signal.h>
   64  #include <errno.h>
   65  #include <libgen.h>
   66  #include <stdio.h>
   67  #include <stdlib.h>
   68  #include <stdarg.h>
   69  #include <stdbool.h>
   70  #include <paths.h>
   71  #include <pwd.h>
   72  #include <grp.h>
   73  #include <ttyent.h>
   74  #include <dlfcn.h>
   75  #include <dirent.h>
   76  #include <string.h>
   77  #include <ctype.h>
   78  #include <glob.h>
   79  #include <System/sys/spawn.h>
   80  #include <System/sys/spawn_internal.h>
   81  #include <spawn.h>
   82  #include <spawn_private.h>
   83  #include <time.h>
   84  #include <libinfo.h>
   85  #include <os/assumes.h>
   86  #include <xpc/launchd.h>
   87  #include <asl.h>
   88  #include <_simple.h>
   89  
   90  #include <libproc.h>
   91  #include <libproc_internal.h>
   92  #include <System/sys/proc_info.h>
   93  #include <malloc/malloc.h>
   94  #include <pthread.h>
   95  #if HAVE_SANDBOX
   96  #define __APPLE_API_PRIVATE
   97  #include <sandbox.h>
   98  #endif
   99  #if HAVE_QUARANTINE
  100  #include <quarantine.h>
  101  #endif
  102  #if HAVE_RESPONSIBILITY
  103  #include <responsibility.h>
  104  #endif
  105  #if !TARGET_OS_EMBEDDED
  106  extern int gL1CacheEnabled;
  107  #endif
  108  #if HAVE_SYSTEMSTATS
  109  #include <systemstats/systemstats.h>
  110  #endif
  111  
  112  #include "launch.h"
  113  #include "launch_priv.h"
  114  #include "launch_internal.h"
  115  #include "bootstrap.h"
  116  #include "bootstrap_priv.h"
  117  #include "vproc.h"
  118  #include "vproc_internal.h"
  119  
  120  #include "reboot2.h"
  121  
  122  #include "launchd.h"
  123  #include "runtime.h"
  124  #include "ipc.h"
  125  #include "job.h"
  126  #include "jobServer.h"
  127  #include "job_reply.h"
  128  #include "job_forward.h"
  129  #include "mach_excServer.h"
  130  
  131  #define POSIX_SPAWN_IOS_INTERACTIVE 0
  132  
  133  #if TARGET_OS_EMBEDDED
  134  /* Default memory highwatermark for daemons as set out in <rdar://problem/10307788>. */
  135  #define DEFAULT_JETSAM_DAEMON_HIGHWATERMARK 5
  136  #endif
  137  
  138  /* LAUNCHD_DEFAULT_EXIT_TIMEOUT
  139   *   If the job hasn't exited in the given number of seconds after sending
  140   *   it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
  141   */
  142  #define LAUNCHD_MIN_JOB_RUN_TIME 10
  143  #define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
  144  #define LAUNCHD_SIGKILL_TIMER 4
  145  #define LAUNCHD_LOG_FAILED_EXEC_FREQ 10
  146  
  147  #define SHUTDOWN_LOG_DIR "/var/log/shutdown"
  148  
  149  #define TAKE_SUBSET_NAME "TakeSubsetName"
  150  #define TAKE_SUBSET_PID "TakeSubsetPID"
  151  #define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
  152  
  153  #define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
  154  
  155  extern char **environ;
  156  
  157  struct waiting_for_removal {
  158  	SLIST_ENTRY(waiting_for_removal) sle;
  159  	mach_port_t reply_port;
  160  };
  161  
  162  static bool waiting4removal_new(job_t j, mach_port_t rp);
  163  static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
  164  
  165  struct machservice {
  166  	SLIST_ENTRY(machservice) sle;
  167  	SLIST_ENTRY(machservice) special_port_sle;
  168  	LIST_ENTRY(machservice) name_hash_sle;
  169  	LIST_ENTRY(machservice) port_hash_sle;
  170  	struct machservice *alias;
  171  	job_t job;
  172  	unsigned int gen_num;
  173  	mach_port_name_t port;
  174  	unsigned int
  175  		isActive:1,
  176  		reset:1,
  177  		recv:1,
  178  		hide:1,
  179  		kUNCServer:1,
  180  		per_user_hack:1,
  181  		debug_on_close:1,
  182  		per_pid:1,
  183  		delete_on_destruction:1,
  184  		drain_one_on_crash:1,
  185  		drain_all_on_crash:1,
  186  		upfront:1,
  187  		event_channel:1,
  188  		recv_race_hack :1,
  189  		/* Don't let the size of this field to get too small. It has to be large
  190  		 * enough to represent the reasonable range of special port numbers.
  191  		 */
  192  		special_port_num:17;
  193  	const char name[0];
  194  };
  195  
  196  // HACK: This should be per jobmgr_t
  197  static SLIST_HEAD(, machservice) special_ports;
  198  
  199  #define PORT_HASH_SIZE 32
  200  #define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
  201  
  202  static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
  203  
  204  static void machservice_setup(launch_data_t obj, const char *key, void *context);
  205  static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
  206  static void machservice_resetport(job_t j, struct machservice *ms);
  207  static void machservice_stamp_port(job_t j, struct machservice *ms);
  208  static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
  209  static struct machservice *machservice_new_alias(job_t aj, struct machservice *orig);
  210  static void machservice_ignore(job_t j, struct machservice *ms);
  211  static void machservice_watch(job_t j, struct machservice *ms);
  212  static void machservice_delete(job_t j, struct machservice *, bool port_died);
  213  static void machservice_request_notifications(struct machservice *);
  214  static mach_port_t machservice_port(struct machservice *);
  215  static job_t machservice_job(struct machservice *);
  216  static bool machservice_hidden(struct machservice *);
  217  static bool machservice_active(struct machservice *);
  218  static const char *machservice_name(struct machservice *);
  219  static bootstrap_status_t machservice_status(struct machservice *);
  220  void machservice_drain_port(struct machservice *);
  221  
  222  struct socketgroup {
  223  	SLIST_ENTRY(socketgroup) sle;
  224  	int *fds;
  225  	unsigned int fd_cnt;
  226  	union {
  227  		const char name[0];
  228  		char name_init[0];
  229  	};
  230  };
  231  
  232  static bool socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt);
  233  static void socketgroup_delete(job_t j, struct socketgroup *sg);
  234  static void socketgroup_watch(job_t j, struct socketgroup *sg);
  235  static void socketgroup_ignore(job_t j, struct socketgroup *sg);
  236  static void socketgroup_callback(job_t j);
  237  static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
  238  static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
  239  
  240  struct calendarinterval {
  241  	LIST_ENTRY(calendarinterval) global_sle;
  242  	SLIST_ENTRY(calendarinterval) sle;
  243  	job_t job;
  244  	struct tm when;
  245  	time_t when_next;
  246  };
  247  
  248  static LIST_HEAD(, calendarinterval) sorted_calendar_events;
  249  
  250  static bool calendarinterval_new(job_t j, struct tm *w);
  251  static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
  252  static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
  253  static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
  254  static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
  255  static void calendarinterval_callback(void);
  256  static void calendarinterval_sanity_check(void);
  257  
  258  struct envitem {
  259  	SLIST_ENTRY(envitem) sle;
  260  	char *value;
  261  	union {
  262  		const char key[0];
  263  		char key_init[0];
  264  	};
  265  };
  266  
  267  static bool envitem_new(job_t j, const char *k, const char *v, bool global);
  268  static void envitem_delete(job_t j, struct envitem *ei, bool global);
  269  static void envitem_setup(launch_data_t obj, const char *key, void *context);
  270  
  271  struct limititem {
  272  	SLIST_ENTRY(limititem) sle;
  273  	struct rlimit lim;
  274  	unsigned int setsoft:1, sethard:1, which:30;
  275  };
  276  
  277  static bool limititem_update(job_t j, int w, rlim_t r);
  278  static void limititem_delete(job_t j, struct limititem *li);
  279  static void limititem_setup(launch_data_t obj, const char *key, void *context);
  280  #if HAVE_SANDBOX
  281  static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
  282  #endif
  283  
  284  static void jetsam_property_setup(launch_data_t obj, const char *key, job_t j);
  285  
  286  typedef enum {
  287  	NETWORK_UP = 1,
  288  	NETWORK_DOWN,
  289  	SUCCESSFUL_EXIT,
  290  	FAILED_EXIT,
  291  	CRASHED,
  292  	DID_NOT_CRASH,
  293  	OTHER_JOB_ENABLED,
  294  	OTHER_JOB_DISABLED,
  295  	OTHER_JOB_ACTIVE,
  296  	OTHER_JOB_INACTIVE,
  297  } semaphore_reason_t;
  298  
  299  struct semaphoreitem {
  300  	SLIST_ENTRY(semaphoreitem) sle;
  301  	semaphore_reason_t why;
  302  
  303  	union {
  304  		const char what[0];
  305  		char what_init[0];
  306  	};
  307  };
  308  
  309  struct semaphoreitem_dict_iter_context {
  310  	job_t j;
  311  	semaphore_reason_t why_true;
  312  	semaphore_reason_t why_false;
  313  };
  314  
  315  static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
  316  static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
  317  static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
  318  static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
  319  static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
  320  
  321  struct externalevent {
  322  	LIST_ENTRY(externalevent) sys_le;
  323  	LIST_ENTRY(externalevent) job_le;
  324  	struct eventsystem *sys;
  325  
  326  	uint64_t id;
  327  	job_t job;
  328  	bool state;
  329  	bool wanted_state;
  330  	bool internal;
  331  	xpc_object_t event;
  332  	xpc_object_t entitlements;
  333  
  334  	char name[0];
  335  };
  336  
  337  struct externalevent_iter_ctx {
  338  	job_t j;
  339  	struct eventsystem *sys;
  340  };
  341  
  342  static bool externalevent_new(job_t j, struct eventsystem *sys, const char *evname, xpc_object_t event, uint64_t flags);
  343  static void externalevent_delete(struct externalevent *ee);
  344  static void externalevent_setup(launch_data_t obj, const char *key, void *context);
  345  static struct externalevent *externalevent_find(const char *sysname, uint64_t id);
  346  
  347  struct eventsystem {
  348  	LIST_ENTRY(eventsystem) global_le;
  349  	LIST_HEAD(, externalevent) events;
  350  	uint64_t curid;
  351  	char name[0];
  352  };
  353  
  354  static struct eventsystem *eventsystem_new(const char *name);
  355  static void eventsystem_delete(struct eventsystem *sys) __attribute__((unused));
  356  static void eventsystem_setup(launch_data_t obj, const char *key, void *context);
  357  static struct eventsystem *eventsystem_find(const char *name);
  358  static void eventsystem_ping(void);
  359  
  360  struct waiting4attach {
  361  	LIST_ENTRY(waiting4attach) le;
  362  	mach_port_t port;
  363  	pid_t dest;
  364  	xpc_service_type_t type;
  365  	char name[0];
  366  };
  367  
  368  static LIST_HEAD(, waiting4attach) _launchd_domain_waiters;
  369  
  370  static struct waiting4attach *waiting4attach_new(jobmgr_t jm, const char *name, mach_port_t port, pid_t dest, xpc_service_type_t type);
  371  static void waiting4attach_delete(jobmgr_t jm, struct waiting4attach *w4a);
  372  static struct waiting4attach *waiting4attach_find(jobmgr_t jm, job_t j);
  373  
  374  #define ACTIVE_JOB_HASH_SIZE 32
  375  #define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
  376  
  377  #define MACHSERVICE_HASH_SIZE	37
  378  
  379  #define LABEL_HASH_SIZE 53
  380  struct jobmgr_s {
  381  	kq_callback kqjobmgr_callback;
  382  	LIST_ENTRY(jobmgr_s) xpc_le;
  383  	SLIST_ENTRY(jobmgr_s) sle;
  384  	SLIST_HEAD(, jobmgr_s) submgrs;
  385  	LIST_HEAD(, job_s) jobs;
  386  	LIST_HEAD(, waiting4attach) attaches;
  387  
  388  	/* For legacy reasons, we keep all job labels that are imported in the root
  389  	 * job manager's label hash. If a job manager is an XPC domain, then it gets
  390  	 * its own label hash that is separate from the "global" one stored in the
  391  	 * root job manager.
  392  	 */
  393  	LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
  394  	LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
  395  	LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
  396  	LIST_HEAD(, job_s) global_env_jobs;
  397  	mach_port_t jm_port;
  398  	mach_port_t req_port;
  399  	jobmgr_t parentmgr;
  400  	int reboot_flags;
  401  	time_t shutdown_time;
  402  	unsigned int global_on_demand_cnt;
  403  	unsigned int normal_active_cnt;
  404  	unsigned int 
  405  		shutting_down:1,
  406  		session_initialized:1, 
  407  		killed_stray_jobs:1,
  408  		monitor_shutdown:1,
  409  		shutdown_jobs_dirtied:1,
  410  		shutdown_jobs_cleaned:1,
  411  		xpc_singleton:1;
  412  	uint32_t properties;
  413  	// XPC-specific properties.
  414  	char owner[MAXCOMLEN];
  415  	char *shortdesc;
  416  	mach_port_t req_bsport;
  417  	mach_port_t req_excport;
  418  	mach_port_t req_asport;
  419  	mach_port_t req_gui_asport;
  420  	pid_t req_pid;
  421  	uid_t req_euid;
  422  	gid_t req_egid;
  423  	au_asid_t req_asid;
  424  	vm_offset_t req_ctx;
  425  	mach_msg_type_number_t req_ctx_sz;
  426  	mach_port_t req_rport;
  427  	uint64_t req_uniqueid;
  428  	kern_return_t error;
  429  	union {
  430  		const char name[0];
  431  		char name_init[0];
  432  	};
  433  };
  434  
  435  // Global XPC domains.
  436  static jobmgr_t _s_xpc_system_domain;
  437  static LIST_HEAD(, jobmgr_s) _s_xpc_user_domains;
  438  static LIST_HEAD(, jobmgr_s) _s_xpc_session_domains;
  439  
  440  #define jobmgr_assumes(jm, e) os_assumes_ctx(jobmgr_log_bug, jm, (e))
  441  #define jobmgr_assumes_zero(jm, e) os_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
  442  #define jobmgr_assumes_zero_p(jm, e) posix_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
  443  
  444  static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t asport);
  445  static jobmgr_t jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name);
  446  static jobmgr_t jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid);
  447  static jobmgr_t jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid);
  448  static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
  449  static jobmgr_t jobmgr_parent(jobmgr_t jm);
  450  static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
  451  static bool jobmgr_label_test(jobmgr_t jm, const char *str);
  452  static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
  453  static void jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays);
  454  static void jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np);
  455  static void jobmgr_remove(jobmgr_t jm);
  456  static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
  457  static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
  458  static job_t jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay);
  459  static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
  460  static job_t managed_job(pid_t p);
  461  static jobmgr_t jobmgr_find_by_name(jobmgr_t jm, const char *where);
  462  static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
  463  static job_t jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp);
  464  static void job_export_all2(jobmgr_t jm, launch_data_t where);
  465  static void jobmgr_callback(void *obj, struct kevent *kev);
  466  static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
  467  static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
  468  static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
  469  static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
  470  static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
  471  static void jobmgr_log_perf_statistics(jobmgr_t jm, bool signal_children);
  472  // static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
  473  static bool jobmgr_log_bug(_SIMPLE_STRING asl_message, void *ctx, const char *message);
  474  
  475  #define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
  476  #define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
  477  #define AUTO_PICK_XPC_LABEL (const char *)(~2)
  478  
  479  struct suspended_peruser {
  480  	LIST_ENTRY(suspended_peruser) sle;
  481  	job_t j;
  482  };
  483  
  484  struct job_s {
  485  	// MUST be first element of this structure.
  486  	kq_callback kqjob_callback;
  487  	LIST_ENTRY(job_s) sle;
  488  	LIST_ENTRY(job_s) subjob_sle;
  489  	LIST_ENTRY(job_s) needing_session_sle;
  490  	LIST_ENTRY(job_s) jetsam_sle;
  491  	LIST_ENTRY(job_s) pid_hash_sle;
  492  	LIST_ENTRY(job_s) global_pid_hash_sle;
  493  	LIST_ENTRY(job_s) label_hash_sle;
  494  	LIST_ENTRY(job_s) global_env_sle;
  495  	SLIST_ENTRY(job_s) curious_jobs_sle;
  496  	LIST_HEAD(, suspended_peruser) suspended_perusers;
  497  	LIST_HEAD(, waiting_for_exit) exit_watchers;
  498  	LIST_HEAD(, job_s) subjobs;
  499  	LIST_HEAD(, externalevent) events;
  500  	SLIST_HEAD(, socketgroup) sockets;
  501  	SLIST_HEAD(, calendarinterval) cal_intervals;
  502  	SLIST_HEAD(, envitem) global_env;
  503  	SLIST_HEAD(, envitem) env;
  504  	SLIST_HEAD(, limititem) limits;
  505  	SLIST_HEAD(, machservice) machservices;
  506  	SLIST_HEAD(, semaphoreitem) semaphores;
  507  	SLIST_HEAD(, waiting_for_removal) removal_watchers;
  508  	struct waiting4attach *w4a;
  509  	job_t original;
  510  	job_t alias;
  511  	cpu_type_t *j_binpref;
  512  	size_t j_binpref_cnt;
  513  	mach_port_t j_port;
  514  	mach_port_t exit_status_dest;
  515  	mach_port_t exit_status_port;
  516  	mach_port_t spawn_reply_port;
  517  	uid_t mach_uid;
  518  	jobmgr_t mgr;
  519  	size_t argc;
  520  	char **argv;
  521  	char *prog;
  522  	char *rootdir;
  523  	char *workingdir;
  524  	char *username;
  525  	char *groupname;
  526  	char *stdinpath;
  527  	char *stdoutpath;
  528  	char *stderrpath;
  529  	char *alt_exc_handler;
  530  	char *cfbundleidentifier;
  531  	unsigned int nruns;
  532  	uint64_t trt;
  533  #if HAVE_SANDBOX
  534  	char *seatbelt_profile;
  535  	uint64_t seatbelt_flags;
  536  	char *container_identifier;
  537  #endif
  538  #if HAVE_QUARANTINE
  539  	void *quarantine_data;
  540  	size_t quarantine_data_sz;
  541  #endif
  542  	pid_t p;
  543  	uint64_t uniqueid;
  544  	int last_exit_status;
  545  	int stdin_fd;
  546  	int fork_fd;
  547  	int nice;
  548  	uint32_t pstype;
  549  	uint32_t psproctype;
  550  	int32_t jetsam_priority;
  551  	int32_t jetsam_memlimit;
  552  	int32_t main_thread_priority;
  553  	uint32_t timeout;
  554  	uint32_t exit_timeout;
  555  	uint64_t sent_signal_time;
  556  	uint64_t start_time;
  557  	uint32_t min_run_time;
  558  	bool unthrottle;
  559  	uint32_t start_interval;
  560  	uint32_t peruser_suspend_count;
  561  	uuid_t instance_id;
  562  	mode_t mask;
  563  	mach_port_t asport;
  564  	au_asid_t asid;
  565  	uuid_t expected_audit_uuid;
  566  	bool 	
  567  		// man launchd.plist --> Debug
  568  		debug:1,
  569  		// man launchd.plist --> KeepAlive == false
  570  		ondemand:1,
  571  		// man launchd.plist --> SessionCreate
  572  		session_create:1,
  573  		// man launchd.plist --> LowPriorityIO
  574  		low_pri_io:1,
  575  		// man launchd.plist --> InitGroups
  576  		no_init_groups:1,
  577  		/* A legacy mach_init concept to make bootstrap_create_server/service()
  578  		 * work
  579  		 */
  580  		priv_port_has_senders:1,
  581  		// A hack during job importing
  582  		importing_global_env:1,
  583  		// A hack during job importing
  584  		importing_hard_limits:1,
  585  		// man launchd.plist --> Umask
  586  		setmask:1,
  587  		// A process that launchd knows about but doesn't manage.
  588  		anonymous:1,
  589  		// A legacy mach_init concept to detect sick jobs
  590  		checkedin:1,
  591  		// A job created via bootstrap_create_server()
  592  		legacy_mach_job:1,
  593  		// A job created via spawn_via_launchd()
  594  		legacy_LS_job:1,
  595  		// A legacy job that wants inetd compatible semantics
  596  		inetcompat:1,
  597  		// A twist on inetd compatibility
  598  		inetcompat_wait:1,
  599  		/* An event fired and the job should start, but not necessarily right
  600  		 * away.
  601  		 */	
  602  		start_pending:1,
  603  		// man launchd.plist --> EnableGlobbing
  604  		globargv:1,
  605  		// man launchd.plist --> WaitForDebugger
  606  		wait4debugger:1,
  607  		// One-shot WaitForDebugger.
  608  		wait4debugger_oneshot:1,
  609  		// MachExceptionHandler == true
  610  		internal_exc_handler:1,
  611  		// A hack to support an option of spawn_via_launchd()
  612  		stall_before_exec:1,
  613  		/* man launchd.plist --> LaunchOnlyOnce.
  614  		 *
  615  		 * Note: <rdar://problem/5465184> Rename this to "HopefullyNeverExits".
  616  		 */
  617  		only_once:1,
  618  		/* Make job_ignore() / job_watch() work. If these calls were balanced,
  619  		 * then this wouldn't be necessarily.
  620  		 */
  621  		currently_ignored:1,
  622  		/* A job that forced all other jobs to be temporarily launch-on-
  623  		 * demand
  624  		 */
  625  		forced_peers_to_demand_mode:1,
  626  		// man launchd.plist --> Nice
  627  		setnice:1,
  628  		/* A job was asked to be unloaded/removed while running, we'll remove it
  629  		 * after it exits.
  630  		 */
  631  		removal_pending:1,
  632  		// job_kill() was called.
  633  		sent_sigkill:1,
  634  		// Enter the kernel debugger before killing a job.
  635  		debug_before_kill:1,
  636  		// A hack that launchd+launchctl use during jobmgr_t creation.
  637  		weird_bootstrap:1,
  638  		// man launchd.plist --> StartOnMount
  639  		start_on_mount:1,
  640  		// This job is a per-user launchd managed by the PID 1 launchd.
  641  		per_user:1,
  642  		// A job thoroughly confused launchd. We need to unload it ASAP.
  643  		unload_at_mig_return:1,
  644  		// man launchd.plist --> AbandonProcessGroup
  645  		abandon_pg:1,
  646  		/* During shutdown, do not send SIGTERM to stray processes in the
  647  		 * process group of this job.
  648  		 */
  649  		ignore_pg_at_shutdown:1,
  650  		/* Don't let this job create new 'job_t' objects in launchd. Has been
  651  		 * seriously overloaded for the purposes of sandboxing.
  652  		 */
  653  		deny_job_creation:1,
  654  		// man launchd.plist --> EnableTransactions
  655  		enable_transactions:1,
  656  		// The job was sent SIGKILL because it was clean.
  657  		clean_kill:1,
  658  		// The job has an OtherJobEnabled KeepAlive criterion.
  659  		nosy:1,
  660  		// The job exited due to a crash.
  661  		crashed:1,
  662  		// We've received NOTE_EXIT for the job and reaped it.
  663  		reaped:1,
  664  		// job_stop() was called.
  665  		stopped:1,
  666  		/* The job is to be kept alive continuously, but it must first get an
  667  		 * initial kick off.
  668  		 */
  669  		needs_kickoff:1,
  670  		// The job is a bootstrapper.
  671  		is_bootstrapper:1,
  672  		// The job owns the console.
  673  		has_console:1,
  674  		/* The job runs as a non-root user on embedded but has select privileges
  675  		 * of the root user. This is SpringBoard.
  676  		 */
  677  		embedded_god:1,
  678  		// The job is responsible for drawing the home screen on embedded.
  679  		embedded_home:1,
  680  		// We got NOTE_EXEC for the job.
  681  		did_exec:1,
  682  		// The job is an XPC service, and XPC proxy successfully exec(3)ed.
  683  		xpcproxy_did_exec:1,
  684  		// The (anonymous) job called vprocmgr_switch_to_session().
  685  		holds_ref:1,
  686  		// The job has Jetsam limits in place.
  687  		jetsam_properties:1,
  688  		// The job's Jetsam memory limits should only be applied in the background
  689   		jetsam_memory_limit_background:1,
  690  		/* This job was created as the result of a look up of a service provided
  691  		 * by a MultipleInstance job.
  692  		 */
  693  		dedicated_instance:1,
  694  		// The job supports creating additional instances of itself.
  695  		multiple_instances:1,
  696  		/* The sub-job was already removed from the parent's list of
  697  		 * sub-jobs.
  698  		 */
  699  		former_subjob:1,
  700  		/* The job is responsible for monitoring external events for this
  701  		 * launchd.
  702  		 */
  703  		event_monitor:1,
  704  		// The event monitor job has retrieved the initial list of events.
  705  		event_monitor_ready2signal:1,
  706  		// A lame hack.
  707  		removing:1,
  708  		// Disable ASLR when launching this job.
  709  		disable_aslr:1,
  710  		// The job is an XPC Service.
  711  		xpc_service:1,
  712  		// The job is the Performance team's shutdown monitor.
  713  		shutdown_monitor:1,
  714  		// We should open a transaction for the job when shutdown begins.
  715  		dirty_at_shutdown:1,
  716  		/* The job was sent SIGKILL but did not exit in a timely fashion,
  717  		 * indicating a kernel bug.
  718  		 */
  719  		workaround9359725:1,
  720  		// The job is the XPC domain bootstrapper.
  721  		xpc_bootstrapper:1,
  722  		// The job is an app (on either iOS or OS X) and has different resource
  723  		// limitations.
  724  		app:1,
  725  		// FairPlay decryption failed on the job. This should only ever happen
  726  		// to apps.
  727  		fpfail:1,
  728  		// The job failed to exec(3) for reasons that may be transient, so we're
  729  		// waiting for UserEventAgent to tell us when it's okay to try spawning
  730  		// again (i.e. when the executable path appears, when the UID appears,
  731  		// etc.).
  732  		waiting4ok:1,
  733  		// The job exited due to memory pressure.
  734  		jettisoned:1,
  735  		// The job supports idle-exit.
  736  		idle_exit:1,
  737  		// The job was implicitly reaped by the kernel.
  738  		implicit_reap:1,
  739  		system_app :1,
  740  		joins_gui_session :1,
  741  		low_priority_background_io :1,
  742  		legacy_timers :1;
  743  
  744  	const char label[0];
  745  };
  746  
  747  static size_t hash_label(const char *label) __attribute__((pure));
  748  static size_t hash_ms(const char *msstr) __attribute__((pure));
  749  static SLIST_HEAD(, job_s) s_curious_jobs;
  750  static LIST_HEAD(, job_s) managed_actives[ACTIVE_JOB_HASH_SIZE];
  751  
  752  #define job_assumes(j, e) os_assumes_ctx(job_log_bug, j, (e))
  753  #define job_assumes_zero(j, e) os_assumes_zero_ctx(job_log_bug, j, (e))
  754  #define job_assumes_zero_p(j, e) posix_assumes_zero_ctx(job_log_bug, j, (e))
  755  
  756  static void job_import_keys(launch_data_t obj, const char *key, void *context);
  757  static void job_import_bool(job_t j, const char *key, bool value);
  758  static void job_import_string(job_t j, const char *key, const char *value);
  759  static void job_import_integer(job_t j, const char *key, long long value);
  760  static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
  761  static void job_import_array(job_t j, const char *key, launch_data_t value);
  762  static void job_import_opaque(job_t j, const char *key, launch_data_t value);
  763  static bool job_set_global_on_demand(job_t j, bool val);
  764  static const char *job_active(job_t j);
  765  static void job_watch(job_t j);
  766  static void job_ignore(job_t j);
  767  static void job_reap(job_t j);
  768  static bool job_useless(job_t j);
  769  static bool job_keepalive(job_t j);
  770  static void job_dispatch_curious_jobs(job_t j);
  771  static void job_start(job_t j);
  772  static void job_start_child(job_t j) __attribute__((noreturn));
  773  static void job_setup_attributes(job_t j);
  774  static bool job_setup_machport(job_t j);
  775  static kern_return_t job_setup_exit_port(job_t j);
  776  static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
  777  static void job_postfork_become_user(job_t j);
  778  static void job_postfork_test_user(job_t j);
  779  static void job_log_pids_with_weird_uids(job_t j);
  780  static void job_setup_exception_port(job_t j, task_t target_task);
  781  static void job_callback(void *obj, struct kevent *kev);
  782  static void job_callback_proc(job_t j, struct kevent *kev);
  783  static void job_callback_timer(job_t j, void *ident);
  784  static void job_callback_read(job_t j, int ident);
  785  static void job_log_stray_pg(job_t j);
  786  static void job_log_children_without_exec(job_t j);
  787  static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid) __attribute__((malloc, nonnull, warn_unused_result));
  788  static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv) __attribute__((malloc, nonnull(1,2), warn_unused_result));
  789  static job_t job_new_alias(jobmgr_t jm, job_t src);
  790  static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond) __attribute__((malloc, nonnull, warn_unused_result));
  791  static job_t job_new_subjob(job_t j, uuid_t identifier);
  792  static void job_kill(job_t j);
  793  static void job_uncork_fork(job_t j);
  794  static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
  795  static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
  796  static bool job_log_bug(_SIMPLE_STRING asl_message, void *ctx, const char *message);
  797  static void job_log_perf_statistics(job_t j, struct rusage_info_v1 *ri, int64_t exit_status);
  798  #if HAVE_SYSTEMSTATS
  799  static void job_log_systemstats(pid_t pid, uint64_t uniqueid, uint64_t parent_uniqueid, pid_t req_pid, uint64_t req_uniqueid, const char *name, struct rusage_info_v1 *ri, int64_t exit_status);
  800  #endif
  801  static void job_set_exception_port(job_t j, mach_port_t port);
  802  static kern_return_t job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj);
  803  static void job_open_shutdown_transaction(job_t ji);
  804  static void job_close_shutdown_transaction(job_t ji);
  805  static launch_data_t job_do_legacy_ipc_request(job_t j, launch_data_t request, mach_port_t asport);
  806  static void job_setup_per_user_directory(job_t j, uid_t uid, const char *path);
  807  static void job_setup_per_user_directories(job_t j, uid_t uid, const char *label);
  808  static void job_update_jetsam_properties(job_t j, xpc_jetsam_band_t band, uint64_t user_data);
  809  static void job_update_jetsam_memory_limit(job_t j, int32_t limit);
  810  
  811  #if TARGET_OS_EMBEDDED
  812  static bool job_import_defaults(launch_data_t pload);
  813  #endif
  814  
  815  static struct priority_properties_t {
  816  	long long band;
  817  	int priority;
  818  } _launchd_priority_map[] = {
  819  	{ XPC_JETSAM_BAND_SUSPENDED, JETSAM_PRIORITY_IDLE },
  820  	{ XPC_JETSAM_BAND_BACKGROUND_OPPORTUNISTIC, JETSAM_PRIORITY_BACKGROUND_OPPORTUNISTIC },
  821  	{ XPC_JETSAM_BAND_BACKGROUND, JETSAM_PRIORITY_BACKGROUND },
  822  	{ XPC_JETSAM_BAND_MAIL, JETSAM_PRIORITY_MAIL },
  823  	{ XPC_JETSAM_BAND_PHONE, JETSAM_PRIORITY_PHONE },
  824  	{ XPC_JETSAM_BAND_UI_SUPPORT, JETSAM_PRIORITY_UI_SUPPORT },
  825  	{ XPC_JETSAM_BAND_FOREGROUND_SUPPORT, JETSAM_PRIORITY_FOREGROUND_SUPPORT },
  826  	{ XPC_JETSAM_BAND_FOREGROUND, JETSAM_PRIORITY_FOREGROUND },
  827  	{ XPC_JETSAM_BAND_AUDIO, JETSAM_PRIORITY_AUDIO_AND_ACCESSORY },
  828  	{ XPC_JETSAM_BAND_ACCESSORY, JETSAM_PRIORITY_AUDIO_AND_ACCESSORY },
  829  	{ XPC_JETSAM_BAND_CRITICAL, JETSAM_PRIORITY_CRITICAL },
  830  	{ XPC_JETSAM_BAND_TELEPHONY, JETSAM_PRIORITY_TELEPHONY },
  831  };
  832  
  833  static const struct {
  834  	const char *key;
  835  	int val;
  836  } launchd_keys2limits[] = {
  837  	{ LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
  838  	{ LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
  839  	{ LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
  840  	{ LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
  841  	{ LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
  842  	{ LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
  843  	{ LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
  844  	{ LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
  845  	{ LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
  846  };
  847  
  848  static time_t cronemu(int mon, int mday, int hour, int min);
  849  static time_t cronemu_wday(int wday, int hour, int min);
  850  static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
  851  static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
  852  static bool cronemu_hour(struct tm *wtm, int hour, int min);
  853  static bool cronemu_min(struct tm *wtm, int min);
  854  
  855  // miscellaneous file local functions
  856  static size_t get_kern_max_proc(void);
  857  static char **mach_cmd2argv(const char *string);
  858  static size_t our_strhash(const char *s) __attribute__((pure));
  859  
  860  void eliminate_double_reboot(void);
  861  
  862  #pragma mark XPC Domain Forward Declarations
  863  static job_t _xpc_domain_import_service(jobmgr_t jm, launch_data_t pload);
  864  static int _xpc_domain_import_services(job_t j, launch_data_t services);
  865  
  866  #pragma mark XPC Event Forward Declarations
  867  static int xpc_event_find_channel(job_t j, const char *stream, struct machservice **ms);
  868  static int xpc_event_get_event_name(job_t j, xpc_object_t request, xpc_object_t *reply);
  869  static int xpc_event_set_event(job_t j, xpc_object_t request, xpc_object_t *reply);
  870  static int xpc_event_copy_event(job_t j, xpc_object_t request, xpc_object_t *reply);
  871  static int xpc_event_channel_check_in(job_t j, xpc_object_t request, xpc_object_t *reply);
  872  static int xpc_event_channel_look_up(job_t j, xpc_object_t request, xpc_object_t *reply);
  873  static int xpc_event_provider_check_in(job_t j, xpc_object_t request, xpc_object_t *reply);
  874  static int xpc_event_provider_set_state(job_t j, xpc_object_t request, xpc_object_t *reply);
  875  
  876  #pragma mark XPC Process Forward Declarations
  877  static int xpc_process_set_jetsam_band(job_t j, xpc_object_t request, xpc_object_t *reply);
  878  static int xpc_process_set_jetsam_memory_limit(job_t j, xpc_object_t request, xpc_object_t *reply);
  879  
  880  // file local globals
  881  static job_t _launchd_embedded_god = NULL;
  882  static job_t _launchd_embedded_home = NULL;
  883  static size_t total_children;
  884  static size_t total_anon_children;
  885  static mach_port_t the_exception_server;
  886  static job_t workaround_5477111;
  887  static LIST_HEAD(, job_s) s_needing_sessions;
  888  static LIST_HEAD(, eventsystem) _s_event_systems;
  889  static struct eventsystem *_launchd_support_system;
  890  static job_t _launchd_event_monitor;
  891  static job_t _launchd_xpc_bootstrapper;
  892  static job_t _launchd_shutdown_monitor;
  893  
  894  #if TARGET_OS_EMBEDDED
  895  static xpc_object_t _launchd_defaults_cache;
  896  
  897  mach_port_t launchd_audit_port = MACH_PORT_DEAD;
  898  pid_t launchd_audit_session = 0;
  899  #else
  900  mach_port_t launchd_audit_port = MACH_PORT_NULL;
  901  au_asid_t launchd_audit_session = AU_DEFAUDITSID;
  902  #endif
  903  
  904  static int s_no_hang_fd = -1;
  905  
  906  // process wide globals
  907  mach_port_t inherited_bootstrap_port;
  908  jobmgr_t root_jobmgr;
  909  bool launchd_shutdown_debugging = false;
  910  bool launchd_verbose_boot = false;
  911  bool launchd_embedded_handofgod = false;
  912  bool launchd_runtime_busy_time = false;
  913  
  914  void
  915  job_ignore(job_t j)
  916  {
  917  	struct socketgroup *sg;
  918  	struct machservice *ms;
  919  
  920  	if (j->currently_ignored) {
  921  		return;
  922  	}
  923  
  924  	job_log(j, LOG_DEBUG, "Ignoring...");
  925  
  926  	j->currently_ignored = true;
  927  
  928  	SLIST_FOREACH(sg, &j->sockets, sle) {
  929  		socketgroup_ignore(j, sg);
  930  	}
  931  
  932  	SLIST_FOREACH(ms, &j->machservices, sle) {
  933  		machservice_ignore(j, ms);
  934  	}
  935  }
  936  
  937  void
  938  job_watch(job_t j)
  939  {
  940  	struct socketgroup *sg;
  941  	struct machservice *ms;
  942  
  943  	if (!j->currently_ignored) {
  944  		return;
  945  	}
  946  
  947  	job_log(j, LOG_DEBUG, "Watching...");
  948  
  949  	j->currently_ignored = false;
  950  
  951  	SLIST_FOREACH(sg, &j->sockets, sle) {
  952  		socketgroup_watch(j, sg);
  953  	}
  954  
  955  	SLIST_FOREACH(ms, &j->machservices, sle) {
  956  		machservice_watch(j, ms);
  957  	}
  958  }
  959  
  960  void
  961  job_stop(job_t j)
  962  {
  963  	int sig;
  964  
  965  	if (unlikely(!j->p || j->stopped || j->anonymous)) {
  966  		return;
  967  	}
  968  
  969  #if TARGET_OS_EMBEDDED
  970  	if (launchd_embedded_handofgod && _launchd_embedded_god) {
  971  		if (!_launchd_embedded_god->username || !j->username) {
  972  			errno = EPERM;
  973  			return;
  974  		}
  975  
  976  		if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
  977  			errno = EPERM;
  978  			return;
  979  		}
  980  	} else if (launchd_embedded_handofgod) {
  981  		errno = EINVAL;
  982  		return;
  983  	}
  984  #endif
  985  
  986  	j->sent_signal_time = runtime_get_opaque_time();
  987  
  988  	job_log(j, LOG_DEBUG | LOG_CONSOLE, "Stopping job...");
  989  
  990  	int error = -1;
  991  	error = proc_terminate(j->p, &sig);
  992  	if (error) {
  993  		job_log(j, LOG_ERR | LOG_CONSOLE, "Could not terminate job: %d: %s", error, strerror(error));
  994  		job_log(j, LOG_NOTICE | LOG_CONSOLE, "Using fallback option to terminate job...");
  995  		error = kill2(j->p, SIGTERM);
  996  		if (error) {
  997  			job_log(j, LOG_ERR, "Could not signal job: %d: %s", error, strerror(error));
  998  		} else {
  999  			sig = SIGTERM;
 1000  		}
 1001  	}
 1002  
 1003  	if (!error) {
 1004  		switch (sig) {
 1005  		case SIGKILL:
 1006  			j->sent_sigkill = true;
 1007  			j->clean_kill = true;
 1008  
 1009  			/* We cannot effectively simulate an exit for jobs during the course
 1010  			 * of a normal run. Even if we pretend that the job exited, we will
 1011  			 * still not have gotten the receive rights associated with the
 1012  			 * job's MachServices back, so we cannot safely respawn it.
 1013  			 */
 1014  			if (j->mgr->shutting_down) {
 1015  				error = kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j);
 1016  				(void)job_assumes_zero_p(j, error);
 1017  			}
 1018  
 1019  			job_log(j, LOG_DEBUG | LOG_CONSOLE, "Sent job SIGKILL.");
 1020  			break;
 1021  		case SIGTERM:
 1022  			if (j->exit_timeout) {
 1023  				error = kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, j->exit_timeout, j);
 1024  				(void)job_assumes_zero_p(j, error);
 1025  			} else {
 1026  				job_log(j, LOG_NOTICE, "This job has an infinite exit timeout");
 1027  			}
 1028  			job_log(j, LOG_DEBUG, "Sent job SIGTERM.");
 1029  			break;
 1030  		default:
 1031  			job_log(j, LOG_ERR | LOG_CONSOLE, "Job was sent unexpected signal: %d: %s", sig, strsignal(sig));
 1032  			break;
 1033  		}
 1034  	}
 1035  
 1036  	j->stopped = true;
 1037  }
 1038  
 1039  launch_data_t
 1040  job_export(job_t j)
 1041  {
 1042  	launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
 1043  
 1044  	if (r == NULL) {
 1045  		return NULL;
 1046  	}
 1047  
 1048  	if ((tmp = launch_data_new_string(j->label))) {
 1049  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
 1050  	}
 1051  	if ((tmp = launch_data_new_string(j->mgr->name))) {
 1052  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
 1053  	}
 1054  	if ((tmp = launch_data_new_bool(j->ondemand))) {
 1055  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
 1056  	}
 1057  
 1058  	long long status = j->last_exit_status;
 1059  	if (j->fpfail) {
 1060  		status = LAUNCH_EXITSTATUS_FAIRPLAY_FAIL;
 1061  	}
 1062  	if ((tmp = launch_data_new_integer(status))) {
 1063  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
 1064  	}
 1065  
 1066  	if (j->p && (tmp = launch_data_new_integer(j->p))) {
 1067  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
 1068  	}
 1069  	if ((tmp = launch_data_new_integer(j->timeout))) {
 1070  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
 1071  	}
 1072  	if (j->prog && (tmp = launch_data_new_string(j->prog))) {
 1073  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
 1074  	}
 1075  	if (j->stdinpath && (tmp = launch_data_new_string(j->stdinpath))) {
 1076  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDINPATH);
 1077  	}
 1078  	if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
 1079  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
 1080  	}
 1081  	if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
 1082  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
 1083  	}
 1084  	if (likely(j->argv) && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
 1085  		size_t i;
 1086  
 1087  		for (i = 0; i < j->argc; i++) {
 1088  			if ((tmp2 = launch_data_new_string(j->argv[i]))) {
 1089  				launch_data_array_set_index(tmp, tmp2, i);
 1090  			}
 1091  		}
 1092  
 1093  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
 1094  	}
 1095  
 1096  	if (j->enable_transactions && (tmp = launch_data_new_bool(true))) {
 1097  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ENABLETRANSACTIONS);
 1098  	}
 1099  
 1100  	if (j->session_create && (tmp = launch_data_new_bool(true))) {
 1101  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
 1102  	}
 1103  
 1104  	if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
 1105  		if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
 1106  			launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
 1107  		}
 1108  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
 1109  	}
 1110  
 1111  	if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
 1112  		struct socketgroup *sg;
 1113  		unsigned int i;
 1114  
 1115  		SLIST_FOREACH(sg, &j->sockets, sle) {
 1116  			if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
 1117  				for (i = 0; i < sg->fd_cnt; i++) {
 1118  					if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
 1119  						launch_data_array_set_index(tmp2, tmp3, i);
 1120  					}
 1121  				}
 1122  				launch_data_dict_insert(tmp, tmp2, sg->name);
 1123  			}
 1124  		}
 1125  
 1126  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
 1127  	}
 1128  
 1129  	if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
 1130  		struct machservice *ms;
 1131  
 1132  		tmp3 = NULL;
 1133  
 1134  		SLIST_FOREACH(ms, &j->machservices, sle) {
 1135  			if (ms->per_pid) {
 1136  				if (tmp3 == NULL) {
 1137  					tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
 1138  				}
 1139  				if (tmp3) {
 1140  					tmp2 = launch_data_new_machport(MACH_PORT_NULL);
 1141  					launch_data_dict_insert(tmp3, tmp2, ms->name);
 1142  				}
 1143  			} else {
 1144  				tmp2 = launch_data_new_machport(MACH_PORT_NULL);
 1145  				launch_data_dict_insert(tmp, tmp2, ms->name);
 1146  			}
 1147  		}
 1148  
 1149  		launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
 1150  
 1151  		if (tmp3) {
 1152  			launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
 1153  		}
 1154  	}
 1155  
 1156  	return r;
 1157  }
 1158  
 1159  static void
 1160  jobmgr_log_active_jobs(jobmgr_t jm)
 1161  {
 1162  	const char *why_active;
 1163  	jobmgr_t jmi;
 1164  	job_t ji;
 1165  
 1166  	SLIST_FOREACH(jmi, &jm->submgrs, sle) {
 1167  		jobmgr_log_active_jobs(jmi);
 1168  	}
 1169  
 1170  	int level = LOG_DEBUG;
 1171  	if (pid1_magic) {
 1172  		level |= LOG_CONSOLE;
 1173  	}
 1174  
 1175  	LIST_FOREACH(ji, &jm->jobs, sle) {
 1176  		if ((why_active = job_active(ji))) {
 1177  			if (ji->p != 1) {
 1178  				job_log(ji, level, "%s", why_active);
 1179  
 1180  				uint32_t flags = 0;
 1181  				(void)proc_get_dirty(ji->p, &flags);
 1182  				if (!(flags & PROC_DIRTY_TRACKED)) {
 1183  					continue;
 1184  				}
 1185  
 1186  				char *dirty = "clean";
 1187  				if (flags & PROC_DIRTY_IS_DIRTY) {
 1188  					dirty = "dirty";
 1189  				}
 1190  
 1191  				char *idle_exit = "idle-exit unsupported";
 1192  				if (flags & PROC_DIRTY_ALLOWS_IDLE_EXIT) {
 1193  					idle_exit = "idle-exit supported";
 1194  				}
 1195  
 1196  				job_log(ji, level, "Killability: %s/%s", dirty, idle_exit);
 1197  			}
 1198  		}
 1199  	}
 1200  }
 1201  
 1202  static void
 1203  jobmgr_still_alive_with_check(jobmgr_t jm)
 1204  {
 1205  	int level = LOG_DEBUG;
 1206  	if (pid1_magic) {
 1207  		level |= LOG_CONSOLE;
 1208  	}
 1209  
 1210  	jobmgr_log(jm, level, "Still alive with %lu/%lu (normal/anonymous) children.", total_children, total_anon_children);
 1211  	jobmgr_log_active_jobs(jm);
 1212  	launchd_log_push();
 1213  }
 1214  
 1215  jobmgr_t
 1216  jobmgr_shutdown(jobmgr_t jm)
 1217  {
 1218  	jobmgr_t jmi, jmn;
 1219  	jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
 1220  
 1221  	jm->shutdown_time = runtime_get_wall_time() / USEC_PER_SEC;
 1222  
 1223  	struct tm curtime;
 1224  	(void)localtime_r(&jm->shutdown_time, &curtime);
 1225  
 1226  	char date[26];
 1227  	(void)asctime_r(&curtime, date);
 1228  	// Trim the new line that asctime_r(3) puts there for some reason.
 1229  	date[24] = 0;
 1230  
 1231  	if (jm == root_jobmgr && pid1_magic) {
 1232  		jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown begun at: %s", date);
 1233  	} else {
 1234  		jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown begun at: %s", date);
 1235  	}
 1236  
 1237  	jm->shutting_down = true;
 1238  
 1239  	SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
 1240  		jobmgr_shutdown(jmi);
 1241  	}
 1242  
 1243  	if (!jm->parentmgr) {
 1244  		if (pid1_magic) {
 1245  			// Spawn the shutdown monitor.
 1246  			if (_launchd_shutdown_monitor && !_launchd_shutdown_monitor->p) {
 1247  				job_log(_launchd_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Starting shutdown monitor.");
 1248  				job_dispatch(_launchd_shutdown_monitor, true);
 1249  			}
 1250  		}
 1251  
 1252  		(void)jobmgr_assumes_zero_p(jm, kevent_mod((uintptr_t)jm, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 5, jm));
 1253  	}
 1254  
 1255  	return jobmgr_do_garbage_collection(jm);
 1256  }
 1257  
 1258  void
 1259  jobmgr_remove(jobmgr_t jm)
 1260  {
 1261  	jobmgr_t jmi;
 1262  	job_t ji;
 1263  
 1264  	jobmgr_log(jm, LOG_DEBUG, "Removing job manager.");
 1265  	if (!SLIST_EMPTY(&jm->submgrs)) {
 1266  		size_t cnt = 0;
 1267  		while ((jmi = SLIST_FIRST(&jm->submgrs))) {
 1268  			jobmgr_remove(jmi);
 1269  			cnt++;
 1270  		}
 1271  
 1272  		(void)jobmgr_assumes_zero(jm, cnt);
 1273  	}
 1274  
 1275  	while ((ji = LIST_FIRST(&jm->jobs))) {
 1276  		if (!ji->anonymous && ji->p != 0) {
 1277  			job_log(ji, LOG_ERR, "Job is still active at job manager teardown.");
 1278  			ji->p = 0;
 1279  		}
 1280  
 1281  		job_remove(ji);
 1282  	}
 1283  
 1284  	struct waiting4attach *w4ai = NULL;
 1285  	while ((w4ai = LIST_FIRST(&jm->attaches))) {
 1286  		waiting4attach_delete(jm, w4ai);
 1287  	}
 1288  
 1289  	if (jm->req_port) {
 1290  		(void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_port));
 1291  	}
 1292  	if (jm->jm_port) {
 1293  		(void)jobmgr_assumes_zero(jm, launchd_mport_close_recv(jm->jm_port));
 1294  	}
 1295  
 1296  	if (jm->req_bsport) {
 1297  		(void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_bsport));
 1298  	}
 1299  	if (jm->req_excport) {
 1300  		(void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_excport));
 1301  	}
 1302  	if (MACH_PORT_VALID(jm->req_asport)) {
 1303  		(void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_asport));
 1304  	}
 1305  	if (jm->req_rport) {
 1306  		kern_return_t kr = xpc_call_wakeup(jm->req_rport, jm->error);
 1307  		if (!(kr == KERN_SUCCESS || kr == MACH_SEND_INVALID_DEST)) {
 1308  			/* If the originator went away, the reply port will be a dead name,
 1309  			 * and we expect this to fail.
 1310  			 */
 1311  			(void)jobmgr_assumes_zero(jm, kr);
 1312  		}
 1313  	}
 1314  	if (jm->req_ctx) {
 1315  		(void)jobmgr_assumes_zero(jm, vm_deallocate(mach_task_self(), jm->req_ctx, jm->req_ctx_sz));
 1316  	}
 1317  
 1318  	time_t ts = runtime_get_wall_time() / USEC_PER_SEC;
 1319  	struct tm curtime;
 1320  	(void)localtime_r(&ts, &curtime);
 1321  
 1322  	char date[26];
 1323  	(void)asctime_r(&curtime, date);
 1324  	date[24] = 0;
 1325  
 1326  	time_t delta = ts - jm->shutdown_time;
 1327  	if (jm == root_jobmgr && pid1_magic) {
 1328  		jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown finished at: %s", date);
 1329  		jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
 1330  	} else {
 1331  		jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown finished at: %s", date);
 1332  		jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
 1333  	}
 1334  
 1335  	if (jm->parentmgr) {
 1336  		runtime_del_weak_ref();
 1337  		SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
 1338  
 1339  		// Hack for the guest user so that its stuff doesn't persist.
 1340  		//
 1341  		// <rdar://problem/14527875>
 1342  		if (strcmp(jm->name, VPROCMGR_SESSION_AQUA) == 0 && getuid() == 201) {
 1343  			raise(SIGTERM);
 1344  		}
 1345  	} else if (pid1_magic) {
 1346  		eliminate_double_reboot();
 1347  		launchd_log_vm_stats();
 1348  		jobmgr_log_stray_children(jm, true);
 1349  		jobmgr_log(root_jobmgr, LOG_NOTICE | LOG_CONSOLE, "About to call: reboot(%s).", reboot_flags_to_C_names(jm->reboot_flags));
 1350  		launchd_closelog();
 1351  		(void)jobmgr_assumes_zero_p(jm, reboot(jm->reboot_flags));
 1352  	} else {
 1353  		jobmgr_log(jm, LOG_DEBUG, "About to exit");
 1354  		launchd_closelog();
 1355  		exit(EXIT_SUCCESS);
 1356  	}
 1357  
 1358  	free(jm);
 1359  }
 1360  
 1361  void
 1362  job_remove(job_t j)
 1363  {
 1364  	struct waiting_for_removal *w4r;
 1365  	struct calendarinterval *ci;
 1366  	struct semaphoreitem *si;
 1367  	struct socketgroup *sg;
 1368  	struct machservice *ms;
 1369  	struct limititem *li;
 1370  	struct envitem *ei;
 1371  
 1372  	if (j->alias) {
 1373  		/* HACK: Egregious code duplication. But as with machservice_delete(),
 1374  		 * job aliases can't (and shouldn't) have any complex behaviors 
 1375  		 * associated with them.
 1376  		 */
 1377  		while ((ms = SLIST_FIRST(&j->machservices))) {
 1378  			machservice_delete(j, ms, false);
 1379  		}
 1380  
 1381  		LIST_REMOVE(j, sle);
 1382  		LIST_REMOVE(j, label_hash_sle);
 1383  		free(j);
 1384  		return;
 1385  	}
 1386  
 1387  #if TARGET_OS_EMBEDDED
 1388  	if (launchd_embedded_handofgod && _launchd_embedded_god) {
 1389  		if (!(_launchd_embedded_god->username && j->username)) {
 1390  			errno = EPERM;
 1391  			return;
 1392  		}
 1393  
 1394  		if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
 1395  			errno = EPERM;
 1396  			return;
 1397  		}
 1398  	} else if (launchd_embedded_handofgod) {
 1399  		errno = EINVAL;
 1400  		return;
 1401  	}
 1402  #endif
 1403  
 1404  	/* Do this BEFORE we check and see whether the job is still active. If we're
 1405  	 * a sub-job, we're being removed due to the parent job removing us.
 1406  	 * Therefore, the parent job will free itself after this call completes. So
 1407  	 * if we defer removing ourselves from the parent's list, we'll crash when
 1408  	 * we finally get around to it.
 1409  	 */
 1410  	if (j->dedicated_instance && !j->former_subjob) {
 1411  		LIST_REMOVE(j, subjob_sle);
 1412  		j->former_subjob = true;
 1413  	}
 1414  
 1415  	if (unlikely(j->p)) {
 1416  		if (j->anonymous) {
 1417  			job_reap(j);
 1418  		} else {
 1419  			job_log(j, LOG_DEBUG, "Removal pended until the job exits");
 1420  
 1421  			if (!j->removal_pending) {
 1422  				j->removal_pending = true;
 1423  				job_stop(j);
 1424  			}
 1425  
 1426  			return;
 1427  		}
 1428  	}
 1429  
 1430  	if (!j->removing) {
 1431  		j->removing = true;
 1432  		job_dispatch_curious_jobs(j);
 1433  	}
 1434  
 1435  	ipc_close_all_with_job(j);
 1436  
 1437  	if (j->forced_peers_to_demand_mode) {
 1438  		job_set_global_on_demand(j, false);
 1439  	}
 1440  
 1441  	if (job_assumes_zero(j, j->fork_fd)) {
 1442  		(void)posix_assumes_zero(runtime_close(j->fork_fd));
 1443  	}
 1444  
 1445  	if (j->stdin_fd) {
 1446  		(void)posix_assumes_zero(runtime_close(j->stdin_fd));
 1447  	}
 1448  
 1449  	if (j->j_port) {
 1450  		(void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
 1451  	}
 1452  
 1453  	while ((sg = SLIST_FIRST(&j->sockets))) {
 1454  		socketgroup_delete(j, sg);
 1455  	}
 1456  	while ((ci = SLIST_FIRST(&j->cal_intervals))) {
 1457  		calendarinterval_delete(j, ci);
 1458  	}
 1459  	while ((ei = SLIST_FIRST(&j->env))) {
 1460  		envitem_delete(j, ei, false);
 1461  	}
 1462  	while ((ei = SLIST_FIRST(&j->global_env))) {
 1463  		envitem_delete(j, ei, true);
 1464  	}
 1465  	while ((li = SLIST_FIRST(&j->limits))) {
 1466  		limititem_delete(j, li);
 1467  	}
 1468  	while ((ms = SLIST_FIRST(&j->machservices))) {
 1469  		machservice_delete(j, ms, false);
 1470  	}
 1471  	while ((si = SLIST_FIRST(&j->semaphores))) {
 1472  		semaphoreitem_delete(j, si);
 1473  	}
 1474  	while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
 1475  		waiting4removal_delete(j, w4r);
 1476  	}
 1477  
 1478  	struct externalevent *eei = NULL;
 1479  	while ((eei = LIST_FIRST(&j->events))) {
 1480  		externalevent_delete(eei);
 1481  	}
 1482  
 1483  	if (j->event_monitor) {
 1484  		_launchd_event_monitor = NULL;
 1485  	}
 1486  	if (j->xpc_bootstrapper) {
 1487  		_launchd_xpc_bootstrapper = NULL;
 1488  	}
 1489  
 1490  	if (j->prog) {
 1491  		free(j->prog);
 1492  	}
 1493  	if (j->argv) {
 1494  		free(j->argv);
 1495  	}
 1496  	if (j->rootdir) {
 1497  		free(j->rootdir);
 1498  	}
 1499  	if (j->workingdir) {
 1500  		free(j->workingdir);
 1501  	}
 1502  	if (j->username) {
 1503  		free(j->username);
 1504  	}
 1505  	if (j->groupname) {
 1506  		free(j->groupname);
 1507  	}
 1508  	if (j->stdinpath) {
 1509  		free(j->stdinpath);
 1510  	}
 1511  	if (j->stdoutpath) {
 1512  		free(j->stdoutpath);
 1513  	}
 1514  	if (j->stderrpath) {
 1515  		free(j->stderrpath);
 1516  	}
 1517  	if (j->alt_exc_handler) {
 1518  		free(j->alt_exc_handler);
 1519  	}
 1520  	if (j->cfbundleidentifier) {
 1521  		free(j->cfbundleidentifier);
 1522  	}
 1523  #if HAVE_SANDBOX
 1524  	if (j->seatbelt_profile) {
 1525  		free(j->seatbelt_profile);
 1526  	}
 1527  	if (j->container_identifier) {
 1528  		free(j->container_identifier);
 1529  	}
 1530  #endif
 1531  #if HAVE_QUARANTINE
 1532  	if (j->quarantine_data) {
 1533  		free(j->quarantine_data);
 1534  	}
 1535  #endif
 1536  	if (j->j_binpref) {
 1537  		free(j->j_binpref);
 1538  	}
 1539  	if (j->start_interval) {
 1540  		runtime_del_weak_ref();
 1541  		(void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
 1542  	}
 1543  	if (j->exit_timeout) {
 1544  		/* If this fails, it just means the timer's already fired, so no need to
 1545  		 * wrap it in an assumes() macro.
 1546  		 */
 1547  		(void)kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
 1548  	}
 1549  	if (j->asport != MACH_PORT_NULL) {
 1550  		(void)job_assumes_zero(j, launchd_mport_deallocate(j->asport));
 1551  	}
 1552  	if (!uuid_is_null(j->expected_audit_uuid)) {
 1553  		LIST_REMOVE(j, needing_session_sle);
 1554  	}
 1555  	if (j->embedded_god) {
 1556  		_launchd_embedded_god = NULL;
 1557  	}
 1558  	if (j->embedded_home) {
 1559  		_launchd_embedded_home = NULL;
 1560  	}
 1561  	if (j->shutdown_monitor) {
 1562  		_launchd_shutdown_monitor = NULL;
 1563  	}
 1564  
 1565  	(void)kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
 1566  
 1567  	LIST_REMOVE(j, sle);
 1568  	LIST_REMOVE(j, label_hash_sle);
 1569  
 1570  	job_t ji = NULL;
 1571  	job_t jit = NULL;
 1572  	LIST_FOREACH_SAFE(ji, &j->subjobs, subjob_sle, jit) {
 1573  		job_remove(ji);
 1574  	}
 1575  
 1576  	job_log(j, LOG_DEBUG, "Removed");
 1577  
 1578  	j->kqjob_callback = (kq_callback)0x8badf00d;
 1579  	free(j);
 1580  }
 1581  
 1582  void
 1583  socketgroup_setup(launch_data_t obj, const char *key, void *context)
 1584  {
 1585  	launch_data_t tmp_oai;
 1586  	job_t j = context;
 1587  	size_t i, fd_cnt = 1;
 1588  	int *fds;
 1589  
 1590  	if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
 1591  		fd_cnt = launch_data_array_get_count(obj);
 1592  	}
 1593  
 1594  	fds = alloca(fd_cnt * sizeof(int));
 1595  
 1596  	for (i = 0; i < fd_cnt; i++) {
 1597  		if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
 1598  			tmp_oai = launch_data_array_get_index(obj, i);
 1599  		} else {
 1600  			tmp_oai = obj;
 1601  		}
 1602  
 1603  		fds[i] = launch_data_get_fd(tmp_oai);
 1604  	}
 1605  
 1606  	socketgroup_new(j, key, fds, fd_cnt);
 1607  
 1608  	ipc_revoke_fds(obj);
 1609  }
 1610  
 1611  bool
 1612  job_set_global_on_demand(job_t j, bool val)
 1613  {
 1614  	if (j->forced_peers_to_demand_mode && val) {
 1615  		return false;
 1616  	} else if (!j->forced_peers_to_demand_mode && !val) {
 1617  		return false;
 1618  	}
 1619  
 1620  	if ((j->forced_peers_to_demand_mode = val)) {
 1621  		j->mgr->global_on_demand_cnt++;
 1622  	} else {
 1623  		j->mgr->global_on_demand_cnt--;
 1624  	}
 1625  
 1626  	if (j->mgr->global_on_demand_cnt == 0) {
 1627  		jobmgr_dispatch_all(j->mgr, false);
 1628  	}
 1629  
 1630  	return true;
 1631  }
 1632  
 1633  bool
 1634  job_setup_machport(job_t j)
 1635  {
 1636  	if (job_assumes_zero(j, launchd_mport_create_recv(&j->j_port)) != KERN_SUCCESS) {
 1637  		goto out_bad;
 1638  	}
 1639  
 1640  	if (job_assumes_zero(j, runtime_add_mport(j->j_port, job_server)) != KERN_SUCCESS) {
 1641  		goto out_bad2;
 1642  	}
 1643  
 1644  	if (job_assumes_zero(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS)) != KERN_SUCCESS) {
 1645  		(void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
 1646  		goto out_bad;
 1647  	}
 1648  
 1649  	return true;
 1650  out_bad2:
 1651  	(void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
 1652  out_bad:
 1653  	return false;
 1654  }
 1655  
 1656  kern_return_t
 1657  job_setup_exit_port(job_t j)
 1658  {
 1659  	kern_return_t kr = launchd_mport_create_recv(&j->exit_status_port);
 1660  	if (job_assumes_zero(j, kr) != KERN_SUCCESS) {
 1661  		return MACH_PORT_NULL;
 1662  	}
 1663  
 1664  	struct mach_port_limits limits = {
 1665  		.mpl_qlimit = 1,
 1666  	};
 1667  	kr = mach_port_set_attributes(mach_task_self(), j->exit_status_port, MACH_PORT_LIMITS_INFO, (mach_port_info_t)&limits, sizeof(limits));
 1668  	(void)job_assumes_zero(j, kr);
 1669  
 1670  	kr = launchd_mport_make_send_once(j->exit_status_port, &j->exit_status_dest);
 1671  	if (job_assumes_zero(j, kr) != KERN_SUCCESS) {
 1672  		(void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
 1673  		j->exit_status_port = MACH_PORT_NULL;
 1674  	}
 1675  
 1676  	return kr;
 1677  }
 1678  
 1679  job_t 
 1680  job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
 1681  {
 1682  	const char **argv = (const char **)mach_cmd2argv(cmd);
 1683  	job_t jr = NULL;
 1684  
 1685  	if (!argv) {
 1686  		goto out_bad;
 1687  	}
 1688  
 1689  	jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
 1690  	free(argv);
 1691  
 1692  	// Job creation can be denied during shutdown.
 1693  	if (unlikely(jr == NULL)) {
 1694  		goto out_bad;
 1695  	}
 1696  
 1697  	jr->mach_uid = uid;
 1698  	jr->ondemand = ond;
 1699  	jr->legacy_mach_job = true;
 1700  	jr->abandon_pg = true;
 1701  	jr->priv_port_has_senders = true; // the IPC that called us will make-send on this port
 1702  
 1703  	if (!job_setup_machport(jr)) {
 1704  		goto out_bad;
 1705  	}
 1706  
 1707  	job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
 1708  
 1709  	return jr;
 1710  
 1711  out_bad:
 1712  	if (jr) {
 1713  		job_remove(jr);
 1714  	}
 1715  	return NULL;
 1716  }
 1717  
 1718  job_t
 1719  job_new_anonymous(jobmgr_t jm, pid_t anonpid)
 1720  {
 1721  	struct proc_bsdshortinfo proc;
 1722  	bool shutdown_state;
 1723  	job_t jp = NULL, jr = NULL;
 1724  	uid_t kp_euid, kp_uid, kp_svuid;
 1725  	gid_t kp_egid, kp_gid, kp_svgid;
 1726  
 1727  	if (anonpid == 0) {
 1728  		errno = EINVAL;
 1729  		return NULL;
 1730  	}
 1731  
 1732  	if (anonpid >= 100000) {
 1733  		/* The kernel current defines PID_MAX to be 99999, but that define isn't
 1734  		 * exported.
 1735  		 */
 1736  		launchd_syslog(LOG_WARNING, "Did PID_MAX change? Got request from PID: %d", anonpid);
 1737  		errno = EINVAL;
 1738  		return NULL;
 1739  	}
 1740  
 1741  	/* libproc returns the number of bytes written into the buffer upon success,
 1742  	 * zero on failure. I'd much rather it return -1 on failure, like sysctl(3).
 1743  	 */
 1744  	if (proc_pidinfo(anonpid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
 1745  		if (errno != ESRCH) {
 1746  			(void)jobmgr_assumes_zero(jm, errno);
 1747  		}
 1748  		return NULL;
 1749  	}
 1750  
 1751  	if (proc.pbsi_comm[0] == '\0') {
 1752  		launchd_syslog(LOG_WARNING, "Blank command for PID: %d", anonpid);
 1753  		errno = EINVAL;
 1754  		return NULL;
 1755  	}
 1756  
 1757  	if (unlikely(proc.pbsi_status == SZOMB)) {
 1758  		jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID %u: %s", anonpid, proc.pbsi_comm);
 1759  	}
 1760  
 1761  	if (unlikely(proc.pbsi_flags & P_SUGID)) {
 1762  		jobmgr_log(jm, LOG_DEBUG, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid, proc.pbsi_comm);
 1763  	}
 1764  
 1765  	kp_euid = proc.pbsi_uid;
 1766  	kp_uid = proc.pbsi_ruid;
 1767  	kp_svuid = proc.pbsi_svuid;
 1768  	kp_egid = proc.pbsi_gid;
 1769  	kp_gid = proc.pbsi_rgid;
 1770  	kp_svgid = proc.pbsi_svgid;
 1771  
 1772  	if (unlikely(kp_euid != kp_uid || kp_euid != kp_svuid || kp_uid != kp_svuid || kp_egid != kp_gid || kp_egid != kp_svgid || kp_gid != kp_svgid)) {
 1773  		jobmgr_log(jm, LOG_DEBUG, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
 1774  				kp_euid, kp_uid, kp_svuid, kp_egid, kp_gid, kp_svgid, anonpid, proc.pbsi_comm);
 1775  	}
 1776  
 1777  	/* "Fix" for when the kernel turns the process tree into a weird, cyclic
 1778  	 * graph.
 1779  	 *
 1780  	 * See <rdar://problem/7264615> for the symptom and <rdar://problem/5020256>
 1781  	 * as to why this can happen.
 1782  	 */
 1783  	if ((pid_t)proc.pbsi_ppid == anonpid) {
 1784  		jobmgr_log(jm, LOG_WARNING, "Process has become its own parent through ptrace(3). Ignoring: %s", proc.pbsi_comm);
 1785  		errno = EINVAL;
 1786  		return NULL;
 1787  	}
 1788  
 1789  	/* HACK: Normally, job_new() returns an error during shutdown, but anonymous
 1790  	 * jobs can pop up during shutdown and need to talk to us.
 1791  	 */
 1792  	if (unlikely(shutdown_state = jm->shutting_down)) {
 1793  		jm->shutting_down = false;
 1794  	}
 1795  
 1796  	// We only set requestor_pid for XPC domains.
 1797  	const char *whichlabel = (jm->req_pid == anonpid) ? AUTO_PICK_XPC_LABEL : AUTO_PICK_ANONYMOUS_LABEL;
 1798  	if ((jr = job_new(jm, whichlabel, proc.pbsi_comm, NULL))) {
 1799  		u_int proc_fflags = NOTE_EXEC|NOTE_FORK|NOTE_EXIT;
 1800  
 1801  		total_anon_children++;
 1802  		jr->anonymous = true;
 1803  		jr->p = anonpid;
 1804  
 1805  		// Anonymous process reaping is messy.
 1806  		LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
 1807  
 1808  		if (unlikely(kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1)) {
 1809  			if (errno != ESRCH) {
 1810  				(void)job_assumes_zero(jr, errno);
 1811  			}
 1812  
 1813  			// Zombies interact weirdly with kevent(3).
 1814  			job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
 1815  			jr->unload_at_mig_return = true;
 1816  		}
 1817  
 1818  		if (unlikely(shutdown_state)) {
 1819  			job_log(jr, LOG_APPLEONLY, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
 1820  		}
 1821  
 1822  		job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, proc.pbsi_ppid, jp ? ": " : "", jp ? jp->label : "");
 1823  	} else {
 1824  		(void)os_assumes_zero(errno);
 1825  	}
 1826  
 1827  	// Undo our hack from above.
 1828  	if (unlikely(shutdown_state)) {
 1829  		jm->shutting_down = true;
 1830  	}
 1831  
 1832  	/* This is down here to prevent infinite recursion due to a process
 1833  	 * attaching to its parent through ptrace(3) -- causing a cycle in the
 1834  	 * process tree and thereby not making it a tree anymore. We need to make
 1835  	 * sure that the anonymous job has been added to the process list so that
 1836  	 * we'll find the tracing parent PID of the parent process, which is the
 1837  	 * child, when we go looking for it in jobmgr_find_by_pid().
 1838  	 *
 1839  	 * <rdar://problem/7264615>
 1840  	 */
 1841  	switch (proc.pbsi_ppid) {
 1842  	case 0:
 1843  		// The kernel.
 1844  		break;
 1845  	case 1:
 1846  		if (!pid1_magic) {
 1847  			break;
 1848  		}
 1849  		// Fall through.
 1850  	default:
 1851  		jp = jobmgr_find_by_pid(jm, proc.pbsi_ppid, true);
 1852  		if (jobmgr_assumes(jm, jp != NULL)) {
 1853  			if (jp && !jp->anonymous && unlikely(!(proc.pbsi_flags & P_EXEC))) {
 1854  				job_log(jp, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", proc.pbsi_pid);
 1855  			}
 1856  		}
 1857  		break;
 1858  	}
 1859  
 1860  	return jr;
 1861  }
 1862  
 1863  job_t 
 1864  job_new_subjob(job_t j, uuid_t identifier)
 1865  {
 1866  	char label[0];
 1867  	uuid_string_t idstr;
 1868  	uuid_unparse(identifier, idstr);
 1869  	size_t label_sz = snprintf(label, 0, "%s.%s", j->label, idstr);
 1870  
 1871  	job_t nj = (struct job_s *)calloc(1, sizeof(struct job_s) + label_sz + 1);
 1872  	if (nj != NULL) {
 1873  		nj->kqjob_callback = job_callback;
 1874  		nj->original = j;
 1875  		nj->mgr = j->mgr;
 1876  		nj->min_run_time = j->min_run_time;
 1877  		nj->timeout = j->timeout;
 1878  		nj->exit_timeout = j->exit_timeout;
 1879  
 1880  		snprintf((char *)nj->label, label_sz + 1, "%s.%s", j->label, idstr);
 1881  
 1882  		// Set all our simple Booleans that are applicable.
 1883  		nj->debug = j->debug;
 1884  		nj->ondemand = j->ondemand;
 1885  		nj->checkedin = true;
 1886  		nj->low_pri_io = j->low_pri_io;
 1887  		nj->setmask = j->setmask;
 1888  		nj->wait4debugger = j->wait4debugger;
 1889  		nj->internal_exc_handler = j->internal_exc_handler;
 1890  		nj->setnice = j->setnice;
 1891  		nj->abandon_pg = j->abandon_pg;
 1892  		nj->ignore_pg_at_shutdown = j->ignore_pg_at_shutdown;
 1893  		nj->deny_job_creation = j->deny_job_creation;
 1894  		nj->enable_transactions = j->enable_transactions;
 1895  		nj->needs_kickoff = j->needs_kickoff;
 1896  		nj->currently_ignored = true;
 1897  		nj->dedicated_instance = true;
 1898  		nj->xpc_service = j->xpc_service;
 1899  		nj->xpc_bootstrapper = j->xpc_bootstrapper;
 1900  		nj->jetsam_priority = j->jetsam_priority;
 1901  		nj->jetsam_memlimit = j->jetsam_memlimit;
 1902  		nj->psproctype = j->psproctype;
 1903  
 1904  		nj->mask = j->mask;
 1905  		uuid_copy(nj->instance_id, identifier);
 1906  
 1907  		// These jobs are purely on-demand Mach jobs.
 1908  		// {Hard | Soft}ResourceLimits are not supported.
 1909  		// JetsamPriority is not supported.
 1910  
 1911  		if (j->prog) {
 1912  			nj->prog = strdup(j->prog);
 1913  		}
 1914  		if (j->argv) {
 1915  			size_t sz = malloc_size(j->argv);
 1916  			nj->argv = (char **)malloc(sz);
 1917  			if (nj->argv != NULL) {
 1918  				// This is the start of our strings.
 1919  				char *p = ((char *)nj->argv) + ((j->argc + 1) * sizeof(char *));
 1920  
 1921  				size_t i = 0;
 1922  				for (i = 0; i < j->argc; i++) {
 1923  					(void)strcpy(p, j->argv[i]);
 1924  					nj->argv[i] = p;
 1925  					p += (strlen(j->argv[i]) + 1);
 1926  				}
 1927  				nj->argv[i] = NULL;
 1928  			} else {
 1929  				(void)job_assumes_zero(nj, errno);
 1930  			}
 1931  
 1932  			nj->argc = j->argc;
 1933  		}
 1934  
 1935  		struct machservice *msi = NULL;
 1936  		SLIST_FOREACH(msi, &j->machservices, sle) {
 1937  			/* Only copy MachServices that were actually declared in the plist.
 1938  			 * So skip over per-PID ones and ones that were created via
 1939  			 * bootstrap_register().
 1940  			 */
 1941  			if (msi->upfront) {
 1942  				mach_port_t mp = MACH_PORT_NULL;
 1943  				struct machservice *msj = machservice_new(nj, msi->name, &mp, false);
 1944  				if (msj != NULL) {
 1945  					msj->reset = msi->reset;
 1946  					msj->delete_on_destruction = msi->delete_on_destruction;
 1947  					msj->drain_one_on_crash = msi->drain_one_on_crash;
 1948  					msj->drain_all_on_crash = msi->drain_all_on_crash;
 1949  
 1950  					kern_return_t kr = mach_port_set_attributes(mach_task_self(), msj->port, MACH_PORT_TEMPOWNER, NULL, 0);
 1951  					(void)job_assumes_zero(j, kr);
 1952  				} else {
 1953  					(void)job_assumes_zero(nj, errno);
 1954  				}
 1955  			}
 1956  		}
 1957  
 1958  		// We ignore global environment variables.
 1959  		struct envitem *ei = NULL;
 1960  		SLIST_FOREACH(ei, &j->env, sle) {
 1961  			if (envitem_new(nj, ei->key, ei->value, false)) {
 1962  				(void)job_assumes_zero(nj, errno);
 1963  			}
 1964  		}
 1965  		uuid_string_t val;
 1966  		uuid_unparse(identifier, val);
 1967  		if (envitem_new(nj, LAUNCH_ENV_INSTANCEID, val, false)) {
 1968  			(void)job_assumes_zero(nj, errno);
 1969  		}
 1970  
 1971  		if (j->rootdir) {
 1972  			nj->rootdir = strdup(j->rootdir);
 1973  		}
 1974  		if (j->workingdir) {
 1975  			nj->workingdir = strdup(j->workingdir);
 1976  		}
 1977  		if (j->username) {
 1978  			nj->username = strdup(j->username);
 1979  		}
 1980  		if (j->groupname) {
 1981  			nj->groupname = strdup(j->groupname);
 1982  		}
 1983  
 1984  		/* FIXME: We shouldn't redirect all the output from these jobs to the
 1985  		 * same file. We should uniquify the file names. But this hasn't shown
 1986  		 * to be a problem in practice.
 1987  		 */
 1988  		if (j->stdinpath) {
 1989  			nj->stdinpath = strdup(j->stdinpath);
 1990  		}
 1991  		if (j->stdoutpath) {
 1992  			nj->stdoutpath = strdup(j->stdinpath);
 1993  		}
 1994  		if (j->stderrpath) {
 1995  			nj->stderrpath = strdup(j->stderrpath);
 1996  		}
 1997  		if (j->alt_exc_handler) {
 1998  			nj->alt_exc_handler = strdup(j->alt_exc_handler);
 1999  		}
 2000  		if (j->cfbundleidentifier) {
 2001  			nj->cfbundleidentifier = strdup(j->cfbundleidentifier);
 2002  		}
 2003  #if HAVE_SANDBOX
 2004  		if (j->seatbelt_profile) {
 2005  			nj->seatbelt_profile = strdup(j->seatbelt_profile);
 2006  		}
 2007  		if (j->container_identifier) {
 2008  			nj->container_identifier = strdup(j->container_identifier);
 2009  		}
 2010  #endif
 2011  
 2012  #if HAVE_QUARANTINE
 2013  		if (j->quarantine_data) {
 2014  			nj->quarantine_data = strdup(j->quarantine_data);
 2015  		}
 2016  		nj->quarantine_data_sz = j->quarantine_data_sz;
 2017  #endif
 2018  		if (j->j_binpref) {
 2019  			size_t sz = malloc_size(j->j_binpref);
 2020  			nj->j_binpref = (cpu_type_t *)malloc(sz);
 2021  			if (nj->j_binpref) {
 2022  				memcpy(&nj->j_binpref, &j->j_binpref, sz);
 2023  			} else {
 2024  				(void)job_assumes_zero(nj, errno);
 2025  			}
 2026  		}
 2027  
 2028  		if (j->asport != MACH_PORT_NULL) {
 2029  			(void)job_assumes_zero(nj, launchd_mport_copy_send(j->asport));
 2030  			nj->asport = j->asport;
 2031  		}
 2032  
 2033  		LIST_INSERT_HEAD(&nj->mgr->jobs, nj, sle);
 2034  
 2035  		jobmgr_t where2put = root_jobmgr;
 2036  		if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
 2037  			where2put = j->mgr;
 2038  		}
 2039  		LIST_INSERT_HEAD(&where2put->label_hash[hash_label(nj->label)], nj, label_hash_sle);
 2040  		LIST_INSERT_HEAD(&j->subjobs, nj, subjob_sle);
 2041  	} else {
 2042  		(void)os_assumes_zero(errno);
 2043  	}
 2044  
 2045  	return nj;
 2046  }
 2047  
 2048  job_t 
 2049  job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
 2050  {
 2051  	const char *const *argv_tmp = argv;
 2052  	char tmp_path[PATH_MAX];
 2053  	char auto_label[1000];
 2054  	const char *bn = NULL;
 2055  	char *co;
 2056  	size_t minlabel_len;
 2057  	size_t i, cc = 0;
 2058  	job_t j;
 2059  
 2060  	__OS_COMPILETIME_ASSERT__(offsetof(struct job_s, kqjob_callback) == 0);
 2061  
 2062  	if (unlikely(jm->shutting_down)) {
 2063  		errno = EINVAL;
 2064  		return NULL;
 2065  	}
 2066  
 2067  	if (unlikely(prog == NULL && argv == NULL)) {
 2068  		errno = EINVAL;
 2069  		return NULL;
 2070  	}
 2071  
 2072  	/* I'd really like to redo this someday. Anonymous jobs carry all the
 2073  	 * baggage of managed jobs with them, even though most of it is unused.
 2074  	 * Maybe when we have Objective-C objects in libSystem, there can be a base
 2075  	 * job type that anonymous and managed jobs inherit from...
 2076  	 */
 2077  	char *anon_or_legacy = (label == AUTO_PICK_ANONYMOUS_LABEL) ? "anonymous" : "mach_init";
 2078  	if (unlikely(label == AUTO_PICK_LEGACY_LABEL || label == AUTO_PICK_ANONYMOUS_LABEL)) {
 2079  		if (prog) {
 2080  			bn = prog;
 2081  		} else {
 2082  			strlcpy(tmp_path, argv[0], sizeof(tmp_path));
 2083  			// prog for auto labels is kp.kp_kproc.p_comm.
 2084  			bn = basename(tmp_path);
 2085  		}
 2086  
 2087  		(void)snprintf(auto_label, sizeof(auto_label), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy, bn);
 2088  		label = auto_label;
 2089  		/* This is so we can do gross things later. See NOTE_EXEC for anonymous
 2090  		 * jobs.
 2091  		 */
 2092  		minlabel_len = strlen(label) + MAXCOMLEN;
 2093  	} else {
 2094  		if (label == AUTO_PICK_XPC_LABEL) {
 2095  			minlabel_len = snprintf(auto_label, sizeof(auto_label), "com.apple.xpc.domain-owner.%s", jm->owner);
 2096  		} else {
 2097  			minlabel_len = strlen(label);
 2098  		}
 2099  	}
 2100  
 2101  	j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
 2102  
 2103  	if (!j) {
 2104  		(void)os_assumes_zero(errno);
 2105  		return NULL;
 2106  	}
 2107  
 2108  	if (unlikely(label == auto_label)) {
 2109  		(void)snprintf((char *)j->label, strlen(label) + 1, "%p.%s.%s", j, anon_or_legacy, bn);
 2110  	} else {
 2111  		(void)strcpy((char *)j->label, (label == AUTO_PICK_XPC_LABEL) ? auto_label : label);
 2112  	}
 2113  
 2114  	j->kqjob_callback = job_callback;
 2115  	j->mgr = jm;
 2116  	j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
 2117  	j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
 2118  	j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
 2119  	j->currently_ignored = true;
 2120  	j->ondemand = true;
 2121  	j->checkedin = true;
 2122  	j->jetsam_priority = DEFAULT_JETSAM_PRIORITY;
 2123  	j->jetsam_memlimit = -1;
 2124  	uuid_clear(j->expected_audit_uuid);
 2125  #if TARGET_OS_EMBEDDED
 2126  	/* Run embedded daemons as background by default. SpringBoard jobs are
 2127  	 * Interactive by default. Unfortunately, so many daemons have opted into
 2128  	 * this priority band that its usefulness is highly questionable.
 2129  	 * 
 2130  	 * See <rdar://problem/9539873>.
 2131  	 *
 2132  	 * Also ensure that daemons have a default memory highwatermark unless
 2133  	 * otherwise specified, as per <rdar://problem/10307814>.
 2134  	 */
 2135  	if (launchd_embedded_handofgod) {
 2136  		j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
 2137  		j->app = true;
 2138  	} else {
 2139  		j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
 2140  		j->jetsam_memlimit = DEFAULT_JETSAM_DAEMON_HIGHWATERMARK;
 2141  	}
 2142  #else
 2143  	/* Jobs on OS X that just come from disk are "standard" by default so that
 2144  	 * third-party daemons/agents don't encounter unexpected throttling.
 2145  	 */
 2146  	j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD;
 2147  #endif
 2148  
 2149  	if (prog) {
 2150  		j->prog = strdup(prog);
 2151  		if (!j->prog) {
 2152  			(void)os_assumes_zero(errno);
 2153  			goto out_bad;
 2154  		}
 2155  	}
 2156  
 2157  	if (likely(argv)) {
 2158  		while (*argv_tmp++) {
 2159  			j->argc++;
 2160  		}
 2161  
 2162  		for (i = 0; i < j->argc; i++) {
 2163  			cc += strlen(argv[i]) + 1;
 2164  		}
 2165  
 2166  		j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
 2167  		if (!j->argv) {
 2168  			(void)job_assumes_zero(j, errno);
 2169  			goto out_bad;
 2170  		}
 2171  
 2172  		co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
 2173  
 2174  		for (i = 0; i < j->argc; i++) {
 2175  			j->argv[i] = co;
 2176  			(void)strcpy(co, argv[i]);
 2177  			co += strlen(argv[i]) + 1;
 2178  		}
 2179  		j->argv[i] = NULL;
 2180  	}
 2181  
 2182  	// Sssshhh... don't tell anyone.
 2183  	if (strcmp(j->label, "com.apple.WindowServer") == 0) {
 2184  		j->has_console = true;
 2185  	}
 2186  
 2187  	LIST_INSERT_HEAD(&jm->jobs, j, sle);
 2188  
 2189  	jobmgr_t where2put_label = root_jobmgr;
 2190  	if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
 2191  		where2put_label = j->mgr;
 2192  	}
 2193  	LIST_INSERT_HEAD(&where2put_label->label_hash[hash_label(j->label)], j, label_hash_sle);
 2194  	uuid_clear(j->expected_audit_uuid);
 2195  
 2196  	job_log(j, LOG_DEBUG, "Conceived");
 2197  
 2198  	return j;
 2199  
 2200  out_bad:
 2201  	if (j->prog) {
 2202  		free(j->prog);
 2203  	}
 2204  	free(j);
 2205  
 2206  	return NULL;
 2207  }
 2208  
 2209  job_t
 2210  job_new_alias(jobmgr_t jm, job_t src)
 2211  {
 2212  	if (job_find(jm, src->label)) {
 2213  		errno = EEXIST;
 2214  		return NULL;
 2215  	}
 2216  
 2217  	job_t j = calloc(1, sizeof(struct job_s) + strlen(src->label) + 1);
 2218  	if (!j) {
 2219  		(void)os_assumes_zero(errno);
 2220  		return NULL;
 2221  	}
 2222  
 2223  	(void)strcpy((char *)j->label, src->label);
 2224  	LIST_INSERT_HEAD(&jm->jobs, j, sle);
 2225  	LIST_INSERT_HEAD(&jm->label_hash[hash_label(j->label)], j, label_hash_sle);
 2226  	/* Bad jump address. The kqueue callback for aliases should never be
 2227  	 * invoked.
 2228  	 */
 2229  	j->kqjob_callback = (kq_callback)0xfa1afe1;
 2230  	j->alias = src;
 2231  	j->mgr = jm;
 2232  
 2233  	struct machservice *msi = NULL;
 2234  	SLIST_FOREACH(msi, &src->machservices, sle) {
 2235  		if (!machservice_new_alias(j, msi)) {
 2236  			jobmgr_log(jm, LOG_ERR, "Failed to alias job: %s", src->label);
 2237  			errno = EINVAL;
 2238  			job_remove(j);
 2239  			j = NULL;
 2240  			break;
 2241  		}
 2242  	}
 2243  
 2244  	if (j) {
 2245  		job_log(j, LOG_DEBUG, "Aliased service into domain: %s", jm->name);
 2246  	}
 2247  
 2248  	return j;
 2249  }
 2250  
 2251  job_t 
 2252  job_import(launch_data_t pload)
 2253  {
 2254  #if TARGET_OS_EMBEDDED
 2255  	/* If this is the special payload of default values, handle it here */
 2256  	if (unlikely(launch_data_dict_lookup(pload, LAUNCH_JOBKEY_DEFAULTS))) {
 2257  		job_import_defaults(pload);
 2258  		return NULL;
 2259  	}
 2260  #endif    
 2261      
 2262  	job_t j = jobmgr_import2(root_jobmgr, pload);
 2263  
 2264  	if (unlikely(j == NULL)) {
 2265  		return NULL;
 2266  	}
 2267  
 2268  	/* Since jobs are effectively stalled until they get security sessions
 2269  	 * assigned to them, we may wish to reconsider this behavior of calling the
 2270  	 * job "enabled" as far as other jobs with the OtherJobEnabled KeepAlive
 2271  	 * criterion set.
 2272  	 */
 2273  	job_dispatch_curious_jobs(j);
 2274  	return job_dispatch(j, false);
 2275  }
 2276  
 2277  #if TARGET_OS_EMBEDDED
 2278  
 2279  bool
 2280  job_import_defaults(launch_data_t pload)
 2281  {
 2282  	bool result = false;
 2283  	xpc_object_t xd = NULL, defaults;
 2284     
 2285  	if (_launchd_defaults_cache) {
 2286  		xpc_release(_launchd_defaults_cache);
 2287  		_launchd_defaults_cache = NULL;
 2288  	}
 2289  
 2290  	xd = ld2xpc(pload);
 2291  	if (!xd || xpc_get_type(xd) != XPC_TYPE_DICTIONARY) {
 2292  		goto out;
 2293  	}
 2294  
 2295  	defaults = xpc_dictionary_get_value(xd, LAUNCHD_JOB_DEFAULTS);
 2296  	if (!defaults || xpc_get_type(defaults) != XPC_TYPE_DICTIONARY) {
 2297  		goto out;
 2298  	}
 2299  
 2300  	_launchd_defaults_cache = xpc_copy(defaults);
 2301  	result = true;
 2302  out:
 2303  	if (xd) {
 2304  		xpc_release(xd);
 2305  	}
 2306  
 2307  	return result;
 2308  }
 2309  
 2310  bool
 2311  job_apply_defaults(job_t j) {
 2312  	const char *test_prefix = "com.apple.test.";
 2313      
 2314  	char *sb_prefix_end, *sb_suffix_start;
 2315  	char true_job_label[strlen(j->label)];
 2316  	const char *label;
 2317  
 2318  	if (((sb_prefix_end = strchr(j->label, ':')) != NULL) &&
 2319  	    ((sb_suffix_start = strchr(sb_prefix_end + 1, '[')) != NULL)) {
 2320   		/*
 2321   		 * Workaround 'UIKitApplication:com.apple.foo[bar]' convention for the processes
 2322   		 * we're interested in. To be removed when <rdar://problem/13066361> is addressed.
 2323   		 */
 2324  		snprintf(true_job_label, sb_suffix_start - sb_prefix_end, "%s", sb_prefix_end + 1);
 2325   		label = true_job_label;
 2326  	} else {
 2327  		/* Just test the standard label */
 2328  		label = j->label;
 2329  	}
 2330      
 2331  	/* Test for cache presence and apply if found */
 2332  	if (_launchd_defaults_cache) { 
 2333  		xpc_object_t props = xpc_dictionary_get_value(_launchd_defaults_cache, label);
 2334  		if (props && xpc_get_type(props) == XPC_TYPE_DICTIONARY) {
 2335  			launch_data_t lv = xpc2ld(props);
 2336  			launch_data_dict_iterate(lv, job_import_keys, j);
 2337  			launch_data_free(lv);
 2338  			return true;
 2339  		}
 2340  	}
 2341      
 2342  	/* Limit free? Disable the memory limit if this is a test job; see <rdar://problem/13180697> */
 2343  	if (!strncmp(label, test_prefix, strlen(test_prefix))) {
 2344  		j->jetsam_memlimit = -1;
 2345  		return true;
 2346  	}
 2347      
 2348  	return false;
 2349  }
 2350  
 2351  #endif
 2352  
 2353  launch_data_t
 2354  job_import_bulk(launch_data_t pload)
 2355  {
 2356  	launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
 2357  	job_t *ja;
 2358  	size_t i, c = launch_data_array_get_count(pload);
 2359  
 2360  	ja = alloca(c * sizeof(job_t));
 2361  
 2362  	for (i = 0; i < c; i++) {
 2363  		if ((likely(ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) && errno != ENEEDAUTH) {
 2364  			errno = 0;
 2365  		}
 2366  		launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
 2367  	}
 2368  
 2369  	for (i = 0; i < c; i++) {
 2370  		if (likely(ja[i])) {
 2371  			job_dispatch_curious_jobs(ja[i]);
 2372  			job_dispatch(ja[i], false);
 2373  		}
 2374  	}
 2375  
 2376  	return resp;
 2377  }
 2378  
 2379  void
 2380  job_import_bool(job_t j, const char *key, bool value)
 2381  {
 2382  	bool found_key = false;
 2383  
 2384  	switch (key[0]) {
 2385  	case 'a':
 2386  	case 'A':
 2387  		if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
 2388  			j->abandon_pg = value;
 2389  			found_key = true;
 2390  		}
 2391  		break;
 2392  	case 'b':
 2393  	case 'B':
 2394  		if (strcasecmp(key, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN) == 0) {
 2395  			j->dirty_at_shutdown = value;
 2396  			found_key = true;
 2397  		}
 2398  		break;
 2399  	case 'j':
 2400  	case 'J':
 2401  		if (strcasecmp(key, LAUNCH_JOBKEY_JOINGUISESSION) == 0) {
 2402  			j->joins_gui_session = value;
 2403  			found_key = true;
 2404  		}
 2405  		break;
 2406  	case 'k':
 2407  	case 'K':
 2408  		if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
 2409  			j->ondemand = !value;
 2410  			found_key = true;
 2411  		}
 2412  		break;
 2413  	case 'o':
 2414  	case 'O':
 2415  		if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
 2416  			j->ondemand = value;
 2417  			found_key = true;
 2418  		}
 2419  		break;
 2420  	case 'd':
 2421  	case 'D':
 2422  		if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
 2423  			j->debug = value;
 2424  			found_key = true;
 2425  		} else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
 2426  			(void)job_assumes(j, !value);
 2427  			found_key = true;
 2428  		} else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLEASLR) == 0) {
 2429  			j->disable_aslr = value;
 2430  			found_key = true;
 2431  		}
 2432  		break;
 2433  	case 'h':
 2434  	case 'H':
 2435  		if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
 2436  			job_log(j, LOG_PERF, "%s has been deprecated. Please use the new %s key instead and add EnableTransactions to your launchd.plist.", LAUNCH_JOBKEY_HOPEFULLYEXITSLAST, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN);
 2437  			j->dirty_at_shutdown = value;
 2438  			found_key = true;
 2439  		}
 2440  		break;
 2441  	case 's':
 2442  	case 'S':
 2443  		if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
 2444  			j->session_create = value;
 2445  			found_key = true;
 2446  		} else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
 2447  			j->start_on_mount = value;
 2448  			found_key = true;
 2449  		} else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
 2450  			// this only does something on Mac OS X 10.4 "Tiger"
 2451  			found_key = true;
 2452  		} else if (strcasecmp(key, LAUNCH_JOBKEY_SHUTDOWNMONITOR) == 0) {
 2453  			if (_launchd_shutdown_monitor) {
 2454  				job_log(j, LOG_ERR, "Only one job may monitor shutdown.");
 2455  			} else {
 2456  				j->shutdown_monitor = true;
 2457  				_launchd_shutdown_monitor = j;
 2458  			}
 2459  			found_key = true;
 2460  		}
 2461  		break;
 2462  	case 'l':
 2463  	case 'L':
 2464  		if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
 2465  			j->low_pri_io = value;
 2466  			found_key = true;
 2467  		} else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
 2468  			j->only_once = value;
 2469  			found_key = true;
 2470  		} else if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYBACKGROUNDIO) == 0) {
 2471  			j->low_priority_background_io = true;
 2472  			found_key = true;
 2473  		} else if (strcasecmp(key, LAUNCH_JOBKEY_LEGACYTIMERS) == 0) {
 2474  #if !TARGET_OS_EMBEDDED
 2475  			j->legacy_timers = value;
 2476  #else // !TARGET_OS_EMBEDDED
 2477  			job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
 2478  #endif // !TARGET_OS_EMBEDDED
 2479  			found_key = true;
 2480  		}
 2481  		break;
 2482  	case 'm':
 2483  	case 'M':
 2484  		if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
 2485  			j->internal_exc_handler = value;
 2486  			found_key = true;
 2487  		} else if (strcasecmp(key, LAUNCH_JOBKEY_MULTIPLEINSTANCES) == 0) {
 2488  			j->multiple_instances = value;
 2489  			found_key = true;
 2490  		}
 2491  		break;
 2492  	case 'i':
 2493  	case 'I':
 2494  		if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
 2495  			if (getuid() != 0) {
 2496  				job_log(j, LOG_WARNING, "Ignored this key: %s", key);
 2497  				return;
 2498  			}
 2499  			j->no_init_groups = !value;
 2500  			found_key = true;
 2501  		} else if (strcasecmp(key, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN) == 0) {
 2502  			j->ignore_pg_at_shutdown = value;
 2503  			found_key = true;
 2504  		}
 2505  		break;
 2506  	case 'r':
 2507  	case 'R':
 2508  		if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
 2509  			if (value) {
 2510  				// We don't want value == false to change j->start_pending
 2511  				j->start_pending = true;
 2512  			}
 2513  			found_key = true;
 2514  		}
 2515  		break;
 2516  	case 'e':
 2517  	case 'E':
 2518  		if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
 2519  			j->globargv = value;
 2520  			found_key = true;
 2521  		} else if (strcasecmp(key, LAUNCH_JOBKEY_ENABLETRANSACTIONS) == 0) {
 2522  			j->enable_transactions = value;
 2523  			found_key = true;
 2524  		} else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
 2525  			j->debug_before_kill = value;
 2526  			found_key = true;
 2527  		} else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION) == 0) {
 2528  #if TARGET_OS_EMBEDDED
 2529  			if (!_launchd_embedded_god) {
 2530  				if ((j->embedded_god = value)) {
 2531  					_launchd_embedded_god = j;
 2532  				}
 2533  			} else {
 2534  				job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
 2535  			}
 2536  #else
 2537  			job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
 2538  #endif
 2539  			found_key = true;
 2540  		} else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDHOMESCREEN) == 0) {
 2541  #if TARGET_OS_EMBEDDED
 2542  			if (!_launchd_embedded_home) {
 2543  				if ((j->embedded_home = value)) {
 2544  					_launchd_embedded_home = j;
 2545  				}
 2546  			} else {
 2547  				job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
 2548  			}
 2549  #else
 2550  			job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
 2551  #endif
 2552  		} else if (strcasecmp(key, LAUNCH_JOBKEY_EVENTMONITOR) == 0) {
 2553  			if (!_launchd_event_monitor) {
 2554  				j->event_monitor = value;
 2555  				if (value) {
 2556  					_launchd_event_monitor = j;
 2557  				}
 2558  			} else {
 2559  				job_log(j, LOG_NOTICE, "Job tried to steal event monitoring responsibility from: %s", _launchd_event_monitor->label);
 2560  			}
 2561  			found_key = true;
 2562  		}
 2563  		break;
 2564  	case 'w':
 2565  	case 'W':
 2566  		if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
 2567  			j->wait4debugger = value;
 2568  			found_key = true;
 2569  		}
 2570  		break;
 2571  	case 'x':
 2572  	case 'X':
 2573  		if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAINBOOTSTRAPPER) == 0) {
 2574  			if (pid1_magic) {
 2575  				if (_launchd_xpc_bootstrapper) {
 2576  					job_log(j, LOG_ERR, "This job tried to steal the XPC domain bootstrapper property from the following job: %s", _launchd_xpc_bootstrapper->label);
 2577  				} else {
 2578  					_launchd_xpc_bootstrapper = j;
 2579  					j->xpc_bootstrapper = value;
 2580  				}
 2581  			} else {
 2582  				job_log(j, LOG_ERR, "Non-daemon tried to claim XPC bootstrapper property.");
 2583  			}
 2584  		}
 2585  		found_key = true;
 2586  		break;
 2587  	default:
 2588  		break;
 2589  	}
 2590  
 2591  	if (unlikely(!found_key)) {
 2592  		job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
 2593  	}
 2594  }
 2595  
 2596  void
 2597  job_import_string(job_t j, const char *key, const char *value)
 2598  {
 2599  	char **where2put = NULL;
 2600  
 2601  	switch (key[0]) {
 2602  	case 'c':
 2603  	case 'C':
 2604  		if (strcasecmp(key, LAUNCH_JOBKEY_CFBUNDLEIDENTIFIER) == 0) {
 2605  			where2put = &j->cfbundleidentifier;
 2606  		}
 2607  		break;
 2608  	case 'm':
 2609  	case 'M':
 2610  		if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
 2611  			where2put = &j->alt_exc_handler;
 2612  		}
 2613  		break;
 2614  	case 'p':
 2615  	case 'P':
 2616  		if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
 2617  			return;
 2618  		} else if (strcasecmp(key, LAUNCH_JOBKEY_POSIXSPAWNTYPE) == 0
 2619  				|| strcasecmp(key, LAUNCH_JOBKEY_PROCESSTYPE) == 0) {
 2620  			if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_INTERACTIVE) == 0) {
 2621  				j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE;
 2622  			} else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_ADAPTIVE) == 0) {
 2623  				j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE;
 2624  			} else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_STANDARD) == 0) {
 2625  				j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD;
 2626  			} else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_BACKGROUND) == 0) {
 2627  				j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
 2628  			} else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_TALAPP) == 0) {
 2629  				j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_TAL;
 2630  			} else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_SYSTEMAPP) == 0) {
 2631  				j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
 2632  				j->system_app = true;
 2633  			} else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_APP) == 0) {
 2634  				j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
 2635  				j->app = true;
 2636  			} else {
 2637  				job_log(j, LOG_ERR, "Unknown value for key %s: %s", key, value);
 2638  			}
 2639  			return;
 2640  		}
 2641  		break;
 2642  	case 'l':
 2643  	case 'L':
 2644  		if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
 2645  			return;
 2646  		} else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
 2647  			return;
 2648  		} else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
 2649  			return;
 2650  		} else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
 2651  			return;
 2652  		}
 2653  		break;
 2654  	case 'r':
 2655  	case 'R':
 2656  		if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
 2657  			if (getuid() != 0) {
 2658  				job_log(j, LOG_WARNING, "Ignored this key: %s", key);
 2659  				return;
 2660  			}
 2661  			where2put = &j->rootdir;
 2662  		}
 2663  		break;
 2664  	case 'w':
 2665  	case 'W':
 2666  		if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
 2667  			where2put = &j->workingdir;
 2668  		}
 2669  		break;
 2670  	case 'u':
 2671  	case 'U':
 2672  		if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
 2673  			if (getuid() != 0) {
 2674  				job_log(j, LOG_WARNING, "Ignored this key: %s", key);
 2675  				return;
 2676  			} else if (strcmp(value, "root") == 0) {
 2677  				return;
 2678  			}
 2679  			where2put = &j->username;
 2680  		}
 2681  		break;
 2682  	case 'g':
 2683  	case 'G':
 2684  		if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
 2685  			if (getuid() != 0) {
 2686  				job_log(j, LOG_WARNING, "Ignored this key: %s", key);
 2687  				return;
 2688  			} else if (strcmp(value, "wheel") == 0) {
 2689  				return;
 2690  			}
 2691  			where2put = &j->groupname;
 2692  		}
 2693  		break;
 2694  	case 's':
 2695  	case 'S':
 2696  		if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
 2697  			where2put = &j->stdoutpath;
 2698  		} else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
 2699  			where2put = &j->stderrpath;
 2700  		} else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDINPATH) == 0) {
 2701  			where2put = &j->stdinpath;
 2702  			j->stdin_fd = _fd(open(value, O_RDONLY|O_CREAT|O_NOCTTY|O_NONBLOCK, DEFFILEMODE));
 2703  			if (job_assumes_zero_p(j, j->stdin_fd) != -1) {
 2704  				// open() should not block, but regular IO by the job should
 2705  				(void)job_assumes_zero_p(j, fcntl(j->stdin_fd, F_SETFL, 0));
 2706  				// XXX -- EV_CLEAR should make named pipes happy?
 2707  				(void)job_assumes_zero_p(j, kevent_mod(j->stdin_fd, EVFILT_READ, EV_ADD|EV_CLEAR, 0, 0, j));
 2708  			} else {
 2709  				j->stdin_fd = 0;
 2710  			}
 2711  #if HAVE_SANDBOX
 2712  		} else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
 2713  			where2put = &j->seatbelt_profile;
 2714  		} else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXCONTAINER) == 0) {
 2715  			where2put = &j->container_identifier;
 2716  #endif
 2717  		}
 2718  		break;
 2719  	case 'X':
 2720  	case 'x':
 2721  		if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAIN) == 0) {
 2722  			return;
 2723  		}
 2724  		break;
 2725  	default:
 2726  		job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
 2727  		break;
 2728  	}
 2729  
 2730  	if (likely(where2put)) {
 2731  		if (!(*where2put = strdup(value))) {
 2732  			(void)job_assumes_zero(j, errno);
 2733  		}
 2734  	} else {
 2735  		// See rdar://problem/5496612. These two are okay.
 2736  		if (strncmp(key, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) == 0
 2737  			|| strncmp(key, "ServiceDescription", sizeof("ServiceDescription")) == 0) {
 2738  			job_log(j, LOG_APPLEONLY, "This key is no longer relevant and should be removed: %s", key);
 2739  		} else {
 2740  			job_log(j, LOG_WARNING, "Unknown key: %s", key);
 2741  		}
 2742  	}
 2743  }
 2744  
 2745  void
 2746  job_import_integer(job_t j, const char *key, long long value)
 2747  {
 2748  	switch (key[0]) {
 2749  	case 'a':
 2750  	case 'A':
 2751  #if TARGET_OS_EMBEDDED
 2752  		if (strcasecmp(key, LAUNCH_JOBKEY_ASID) == 0) {
 2753  			if (launchd_embedded_handofgod) {
 2754  				if (audit_session_port((au_asid_t)value, &j->asport) == -1 && errno != ENOSYS) {
 2755  					(void)job_assumes_zero(j, errno);
 2756  				}
 2757  			}
 2758  		}
 2759  #endif
 2760  	case 'e':
 2761  	case 'E':
 2762  		if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
 2763  			if (unlikely(value < 0)) {
 2764  				job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
 2765  			} else if (unlikely(value > UINT32_MAX)) {
 2766  				job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
 2767  			} else {
 2768  				j->exit_timeout = (typeof(j->exit_timeout)) value;
 2769  			}
 2770  		} else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY) == 0) {
 2771  			j->main_thread_priority = value;
 2772  		}
 2773  		break;
 2774  	case 'j':
 2775  	case 'J':
 2776  		if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0) {
 2777  			job_log(j, LOG_WARNING | LOG_CONSOLE, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
 2778  
 2779  			launch_data_t pri = launch_data_new_integer(value);
 2780  			if (job_assumes(j, pri != NULL)) {
 2781  				jetsam_property_setup(pri, LAUNCH_JOBKEY_JETSAMPRIORITY, j);
 2782  				launch_data_free(pri);
 2783  			}
 2784  		}
 2785  	case 'n':
 2786  	case 'N':
 2787  		if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
 2788  			if (unlikely(value < PRIO_MIN)) {
 2789  				job_log(j, LOG_WARNING, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MIN);
 2790  			} else if (unlikely(value > PRIO_MAX)) {
 2791  				job_log(j, LOG_WARNING, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MAX);
 2792  			} else {
 2793  				j->nice = (typeof(j->nice)) value;
 2794  				j->setnice = true;
 2795  			}
 2796  		}
 2797  		break;
 2798  	case 't':
 2799  	case 'T':
 2800  		if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
 2801  			if (unlikely(value < 0)) {
 2802  				job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
 2803  			} else if (unlikely(value > UINT32_MAX)) {
 2804  				job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
 2805  			} else {
 2806  				j->timeout = (typeof(j->timeout)) value;
 2807  			}
 2808  		} else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
 2809  			if (value < 0) {
 2810  				job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
 2811  			} else if (value > UINT32_MAX) {
 2812  				job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
 2813  			} else {
 2814  				j->min_run_time = (typeof(j->min_run_time)) value;
 2815  			}
 2816  		}
 2817  		break;
 2818  	case 'u':
 2819  	case 'U':
 2820  		if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
 2821  			j->mask = value;
 2822  			j->setmask = true;
 2823  		}
 2824  		break;
 2825  	case 's':
 2826  	case 'S':
 2827  		if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
 2828  			if (unlikely(value <= 0)) {
 2829  				job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
 2830  			} else if (unlikely(value > UINT32_MAX)) {
 2831  				job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
 2832  			} else {
 2833  				runtime_add_weak_ref();
 2834  				j->start_interval = (typeof(j->start_interval)) value;
 2835  
 2836  				(void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j));
 2837  			}
 2838  #if HAVE_SANDBOX
 2839  		} else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
 2840  			j->seatbelt_flags = value;
 2841  #endif
 2842  		}
 2843  
 2844  		break;
 2845  	default:
 2846  		job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
 2847  		break;
 2848  	}
 2849  }
 2850  
 2851  void
 2852  job_import_opaque(job_t j __attribute__((unused)), const char *key, launch_data_t value __attribute__((unused)))
 2853  {
 2854  	switch (key[0]) {
 2855  	case 'q':
 2856  	case 'Q':
 2857  #if HAVE_QUARANTINE
 2858  		if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
 2859  			size_t tmpsz = launch_data_get_opaque_size(value);
 2860  
 2861  			if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
 2862  				memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
 2863  				j->quarantine_data_sz = tmpsz;
 2864  			}
 2865  		}
 2866  #endif
 2867  	case 's':
 2868  	case 'S':
 2869  		if (strcasecmp(key, LAUNCH_JOBKEY_SECURITYSESSIONUUID) == 0) {
 2870  			size_t tmpsz = launch_data_get_opaque_size(value);
 2871  			if (job_assumes(j, tmpsz == sizeof(uuid_t))) {
 2872  				memcpy(j->expected_audit_uuid, launch_data_get_opaque(value), sizeof(uuid_t));
 2873  			}
 2874  		}
 2875  		break;
 2876  	default:
 2877  		break;
 2878  	}
 2879  }
 2880  
 2881  static void
 2882  policy_setup(launch_data_t obj, const char *key, void *context)
 2883  {
 2884  	job_t j = context;
 2885  	bool found_key = false;
 2886  
 2887  	switch (key[0]) {
 2888  	case 'd':
 2889  	case 'D':
 2890  		if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
 2891  			j->deny_job_creation = launch_data_get_bool(obj);
 2892  			found_key = true;
 2893  		}
 2894  		break;
 2895  	default:
 2896  		break;
 2897  	}
 2898  
 2899  	if (unlikely(!found_key)) {
 2900  		job_log(j, LOG_WARNING, "Unknown policy: %s", key);
 2901  	}
 2902  }
 2903  
 2904  void
 2905  job_import_dictionary(job_t j, const char *key, launch_data_t value)
 2906  {
 2907  	launch_data_t tmp;
 2908  
 2909  	switch (key[0]) {
 2910  	case 'p':
 2911  	case 'P':
 2912  		if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
 2913  			launch_data_dict_iterate(value, policy_setup, j);
 2914  		}
 2915  		break;
 2916  	case 'k':
 2917  	case 'K':
 2918  		if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
 2919  			launch_data_dict_iterate(value, semaphoreitem_setup, j);
 2920  		}
 2921  		break;
 2922  	case 'i':
 2923  	case 'I':
 2924  		if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
 2925  			j->inetcompat = true;
 2926  			j->abandon_pg = true;
 2927  			if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
 2928  				j->inetcompat_wait = launch_data_get_bool(tmp);
 2929  			}
 2930  		}
 2931  		break;
 2932  	case 'j':
 2933  	case 'J':
 2934  		if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPROPERTIES) == 0) {
 2935  			launch_data_dict_iterate(value, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, j);
 2936  		}
 2937  	case 'e':
 2938  	case 'E':
 2939  		if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
 2940  			launch_data_dict_iterate(value, envitem_setup, j);
 2941  		}		
 2942  		break;
 2943  	case 'u':
 2944  	case 'U':
 2945  		if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
 2946  			j->importing_global_env = true;
 2947  			launch_data_dict_iterate(value, envitem_setup, j);
 2948  			j->importing_global_env = false;
 2949  		}
 2950  		break;
 2951  	case 's':
 2952  	case 'S':
 2953  		if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
 2954  			launch_data_dict_iterate(value, socketgroup_setup, j);
 2955  		} else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
 2956  			calendarinterval_new_from_obj(j, value);
 2957  		} else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
 2958  			launch_data_dict_iterate(value, limititem_setup, j);
 2959  #if HAVE_SANDBOX
 2960  		} else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
 2961  			launch_data_dict_iterate(value, seatbelt_setup_flags, j);
 2962  #endif
 2963  		}
 2964  		break;
 2965  	case 'h':
 2966  	case 'H':
 2967  		if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
 2968  			j->importing_hard_limits = true;
 2969  			launch_data_dict_iterate(value, limititem_setup, j);
 2970  			j->importing_hard_limits = false;
 2971  		}
 2972  		break;
 2973  	case 'm':
 2974  	case 'M':
 2975  		if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
 2976  			launch_data_dict_iterate(value, machservice_setup, j);
 2977  		}
 2978  		break;
 2979  	case 'l':
 2980  	case 'L':
 2981  		if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHEVENTS) == 0) {
 2982  			launch_data_dict_iterate(value, eventsystem_setup, j);
 2983  		} else {
 2984  			if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHARDWARE) == 0) {
 2985  				return;
 2986  			}
 2987  			if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHARDWARE) == 0) {
 2988  				return;
 2989  			}
 2990  		}
 2991  		break;
 2992  	default:
 2993  		job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
 2994  		break;
 2995  	}
 2996  }
 2997  
 2998  void
 2999  job_import_array(job_t j, const char *key, launch_data_t value)
 3000  {
 3001  	size_t i, value_cnt = launch_data_array_get_count(value);
 3002  
 3003  	switch (key[0]) {
 3004  	case 'p':
 3005  	case 'P':
 3006  		if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
 3007  			return;
 3008  		}
 3009  		break;
 3010  	case 'l':
 3011  	case 'L':
 3012  		if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
 3013  			return;
 3014  		} else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
 3015  			return;
 3016  		} else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
 3017  			job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
 3018  			return;
 3019  		}
 3020  		break;
 3021  	case 'b':
 3022  	case 'B':
 3023  		if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
 3024  			if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
 3025  				j->j_binpref_cnt = value_cnt;
 3026  				for (i = 0; i < value_cnt; i++) {
 3027  					j->j_binpref[i] = (cpu_type_t) launch_data_get_integer(launch_data_array_get_index(value, i));
 3028  				}
 3029  			}
 3030  		}
 3031  		break;
 3032  	case 's':
 3033  	case 'S':
 3034  		if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
 3035  			for (i = 0; i < value_cnt; i++) {
 3036  				calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
 3037  			}
 3038  		}
 3039  		break;
 3040  	default:
 3041  		job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
 3042  		break;
 3043  	}
 3044  }
 3045  
 3046  void
 3047  job_import_keys(launch_data_t obj, const char *key, void *context)
 3048  {
 3049  	job_t j = context;
 3050  	launch_data_type_t kind;
 3051  
 3052  	if (!obj) {
 3053  		launchd_syslog(LOG_ERR, "NULL object given to job_import_keys().");
 3054  		return;
 3055  	}
 3056  
 3057  	kind = launch_data_get_type(obj);
 3058  
 3059  	switch (kind) {
 3060  	case LAUNCH_DATA_BOOL:
 3061  		job_import_bool(j, key, launch_data_get_bool(obj));
 3062  		break;
 3063  	case LAUNCH_DATA_STRING:
 3064  		job_import_string(j, key, launch_data_get_string(obj));
 3065  		break;
 3066  	case LAUNCH_DATA_INTEGER:
 3067  		job_import_integer(j, key, launch_data_get_integer(obj));
 3068  		break;
 3069  	case LAUNCH_DATA_DICTIONARY:
 3070  		job_import_dictionary(j, key, obj);
 3071  		break;
 3072  	case LAUNCH_DATA_ARRAY:
 3073  		job_import_array(j, key, obj);
 3074  		break;
 3075  	case LAUNCH_DATA_OPAQUE:
 3076  		job_import_opaque(j, key, obj);
 3077  		break;
 3078  	default:
 3079  		job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
 3080  		break;
 3081  	}
 3082  }
 3083  
 3084  job_t
 3085  jobmgr_import2(jobmgr_t jm, launch_data_t pload)
 3086  {
 3087  	launch_data_t tmp, ldpa;
 3088  	const char *label = NULL, *prog = NULL;
 3089  	const char **argv = NULL;
 3090  	job_t j;
 3091  
 3092  	if (!jobmgr_assumes(jm, pload != NULL)) {
 3093  		errno = EINVAL;
 3094  		return NULL;
 3095  	}
 3096  
 3097  	if (unlikely(launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY)) {
 3098  		errno = EINVAL;
 3099  		return NULL;
 3100  	}
 3101  
 3102  	if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL)))) {
 3103  		errno = EINVAL;
 3104  		return NULL;
 3105  	}
 3106  
 3107  	if (unlikely(launch_data_get_type(tmp) != LAUNCH_DATA_STRING)) {
 3108  		errno = EINVAL;
 3109  		return NULL;
 3110  	}
 3111  
 3112  	if (unlikely(!(label = launch_data_get_string(tmp)))) {
 3113  		errno = EINVAL;
 3114  		return NULL;
 3115  	}
 3116  
 3117  #if TARGET_OS_EMBEDDED
 3118  	if (unlikely(launchd_embedded_handofgod && _launchd_embedded_god)) {
 3119  		if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_USERNAME)))) {
 3120  			errno = EPERM;
 3121  			return NULL;
 3122  		}
 3123  
 3124  		const char *username = NULL;
 3125  		if (likely(tmp && launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
 3126  			username = launch_data_get_string(tmp);
 3127  		} else {
 3128  			errno = EPERM;
 3129  			return NULL;
 3130  		}
 3131  
 3132  		if (!jobmgr_assumes(jm, _launchd_embedded_god->username != NULL && username != NULL)) {
 3133  			errno = EPERM;
 3134  			return NULL;
 3135  		}
 3136  
 3137  		if (unlikely(strcmp(_launchd_embedded_god->username, username) != 0)) {
 3138  			errno = EPERM;
 3139  			return NULL;
 3140  		}
 3141  	} else if (launchd_embedded_handofgod) {
 3142  		errno = EINVAL;
 3143  		return NULL;
 3144  	}
 3145  #endif
 3146  
 3147  	if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM))
 3148  		&& (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
 3149  		prog = launch_data_get_string(tmp);
 3150  	}
 3151  
 3152  	int argc = 0;
 3153  	if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
 3154  		size_t i, c;
 3155  
 3156  		if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
 3157  			errno = EINVAL;
 3158  			return NULL;
 3159  		}
 3160  
 3161  		c = launch_data_array_get_count(ldpa);
 3162  
 3163  		argv = alloca((c + 1) * sizeof(char *));
 3164  
 3165  		for (i = 0; i < c; i++) {
 3166  			tmp = launch_data_array_get_index(ldpa, i);
 3167  
 3168  			if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
 3169  				errno = EINVAL;
 3170  				return NULL;
 3171  			}
 3172  
 3173  			argv[i] = launch_data_get_string(tmp);
 3174  		}
 3175  
 3176  		argv[i] = NULL;
 3177  		argc = i;
 3178  	}
 3179  
 3180  	if (!prog && argc == 0) {
 3181  		jobmgr_log(jm, LOG_ERR, "Job specifies neither Program nor ProgramArguments: %s", label);
 3182  		errno = EINVAL;
 3183  		return NULL;
 3184  	}
 3185  
 3186  	/* Find the requested session. You cannot load services into XPC domains in
 3187  	 * this manner.
 3188  	 */
 3189  	launch_data_t session = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
 3190  	if (session) {
 3191  		jobmgr_t jmt = NULL;
 3192  		if (launch_data_get_type(session) == LAUNCH_DATA_STRING) {
 3193  			jmt = jobmgr_find_by_name(jm, launch_data_get_string(session));
 3194  			if (!jmt) {
 3195  				jobmgr_log(jm, LOG_ERR, "Could not find requested session: %s", launch_data_get_string(session));
 3196  			} else {
 3197  				jm = jmt;
 3198  			}
 3199  		} else {
 3200  			jobmgr_log(jm, LOG_ERR, "Session type is not a string.");
 3201  		}
 3202  
 3203  		if (!jmt) {
 3204  			errno = EINVAL;
 3205  			return NULL;
 3206  		}
 3207  	}
 3208  
 3209  	/* For legacy reasons, we have a global hash of all labels in all job
 3210  	 * managers. So rather than make it a global, we store it in the root job
 3211  	 * manager. But for an XPC domain, we store a local hash of all services in
 3212  	 * the domain.
 3213  	 */
 3214  	jobmgr_t where2look = (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) ? jm : root_jobmgr;
 3215  	if (unlikely((j = job_find(where2look, label)) != NULL)) {
 3216  		if (jm->xpc_singleton) {
 3217  			/* There can (and probably will be) multiple attemtps to import the
 3218  			 * same XPC service from the same framework. This is okay. It's
 3219  			 * treated as a singleton, so just return the existing one so that
 3220  			 * it may be aliased into the requesting process' XPC domain.
 3221  			 */
 3222  			errno = EEXIST;
 3223  			return j;
 3224  		} else {
 3225  			/* If we're not a global XPC domain, then it's an error to try
 3226  			 * importing the same job/service multiple times.
 3227  			 */
 3228  			errno = EEXIST;
 3229  			return NULL;
 3230  		}
 3231  	} else if (unlikely(!jobmgr_label_test(where2look, label))) {
 3232  		errno = EINVAL;
 3233  		return NULL;
 3234  	}
 3235  	jobmgr_log(jm, LOG_DEBUG, "Importing %s.", label);
 3236  
 3237  	if (likely(j = job_new(jm, label, prog, argv))) {
 3238  #if TARGET_OS_EMBEDDED
 3239  		job_apply_defaults(j);
 3240  #endif
 3241  		launch_data_dict_iterate(pload, job_import_keys, j);
 3242  		if (!uuid_is_null(j->expected_audit_uuid)) {
 3243  			uuid_string_t uuid_str;
 3244  			uuid_unparse(j->expected_audit_uuid, uuid_str);
 3245  			job_log(j, LOG_DEBUG, "Imported job. Waiting for session for UUID %s.", uuid_str);
 3246  			LIST_INSERT_HEAD(&s_needing_sessions, j, needing_session_sle);
 3247  			errno = ENEEDAUTH;
 3248  		} else {
 3249  			job_log(j, LOG_DEBUG, "No security session specified.");
 3250  			j->asport = MACH_PORT_NULL;
 3251  		}
 3252  
 3253  		if (pid1_magic && !jm->parentmgr) {
 3254  			/* Workaround reentrancy in CF. We don't make this a global variable
 3255  			 * because we don't want per-user launchd's to inherit it. So we
 3256  			 * just set it for every job that we import into the System session.
 3257  			 *
 3258  			 * See <rdar://problem/9468837>.
 3259  			 */
 3260  			envitem_new(j, "__CF_USER_TEXT_ENCODING", "0x0:0:0", false);
 3261  		}
 3262  
 3263  		if (j->event_monitor) {
 3264  			eventsystem_ping();
 3265  		}
 3266  
 3267  #if TARGET_OS_EMBEDDED
 3268  		/* SpringBoard and backboardd must run at elevated priority.
 3269  		 *
 3270  		 * See <rdar://problem/9539873> and <rdar://problem/10984383>.
 3271  		 */
 3272  		if (j->embedded_god || j->embedded_home) {
 3273  			j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
 3274  		}
 3275  #endif
 3276  	}
 3277  
 3278  	return j;
 3279  }
 3280  
 3281  bool
 3282  jobmgr_label_test(jobmgr_t jm, const char *str)
 3283  {
 3284  	const char *ptr;
 3285  
 3286  	if (str[0] == '\0') {
 3287  		jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
 3288  		return false;
 3289  	}
 3290  
 3291  	for (ptr = str; *ptr; ptr++) {
 3292  		if (iscntrl(*ptr)) {
 3293  			jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
 3294  			return false;
 3295  		}
 3296  	}
 3297  
 3298  	if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0)
 3299  		|| (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
 3300  		jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
 3301  		return false;
 3302  	}
 3303  
 3304  	return true;
 3305  }
 3306  
 3307  job_t 
 3308  job_find(jobmgr_t jm, const char *label)
 3309  {
 3310  	job_t ji;
 3311  
 3312  	if (!jm) {
 3313  		jm = root_jobmgr;
 3314  	}
 3315  
 3316  	LIST_FOREACH(ji, &jm->label_hash[hash_label(label)], label_hash_sle) {
 3317  		if (unlikely(ji->removal_pending || ji->mgr->shutting_down)) {
 3318  			// 5351245 and 5488633 respectively
 3319  			continue;
 3320  		}
 3321  
 3322  		if (strcmp(ji->label, label) == 0) {
 3323  			return ji;
 3324  		}
 3325  	}
 3326  
 3327  	errno = ESRCH;
 3328  	return NULL;
 3329  }
 3330  
 3331  // Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid().
 3332  job_t
 3333  jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay)
 3334  {
 3335  	job_t ji = NULL;
 3336  	LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
 3337  		if (ji->p == p && (!ji->anonymous || (ji->anonymous && anon_okay))) {
 3338  			return ji;
 3339  		}
 3340  	}
 3341  
 3342  	jobmgr_t jmi = NULL;
 3343  	SLIST_FOREACH(jmi, &jm->submgrs, sle) {
 3344  		if ((ji = jobmgr_find_by_pid_deep(jmi, p, anon_okay))) {
 3345  			break;
 3346  		}
 3347  	}
 3348  
 3349  	return ji;
 3350  }
 3351  
 3352  job_t
 3353  jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
 3354  {
 3355  	job_t ji;
 3356  
 3357  	LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
 3358  		if (ji->p == p) {
 3359  			return ji;
 3360  		}
 3361  	}
 3362  
 3363  	return create_anon ? job_new_anonymous(jm, p) : NULL;
 3364  }
 3365  
 3366  job_t
 3367  managed_job(pid_t p)
 3368  {
 3369  	job_t ji;
 3370  
 3371  	LIST_FOREACH(ji, &managed_actives[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
 3372  		if (ji->p == p) {
 3373  			return ji;
 3374  		}
 3375  	}
 3376  
 3377  	return NULL;
 3378  }
 3379  
 3380  job_t 
 3381  job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
 3382  {
 3383  	jobmgr_t jmi;
 3384  	job_t ji;
 3385  
 3386  	if (jm->jm_port == mport) {
 3387  		return jobmgr_find_by_pid(jm, upid, true);
 3388  	}
 3389  
 3390  	SLIST_FOREACH(jmi, &jm->submgrs, sle) {
 3391  		job_t jr;
 3392  
 3393  		if ((jr = job_mig_intran2(jmi, mport, upid))) {
 3394  			return jr;
 3395  		}
 3396  	}
 3397  
 3398  	LIST_FOREACH(ji, &jm->jobs, sle) {
 3399  		if (ji->j_port == mport) {
 3400  			return ji;
 3401  		}
 3402  	}
 3403  
 3404  	return NULL;
 3405  }
 3406  
 3407  job_t 
 3408  job_mig_intran(mach_port_t p)
 3409  {
 3410  	struct ldcred *ldc = runtime_get_caller_creds();
 3411  	job_t jr;
 3412  
 3413  	jr = job_mig_intran2(root_jobmgr, p, ldc->pid);
 3414  
 3415  	if (!jr) {
 3416  		struct proc_bsdshortinfo proc;
 3417  		if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
 3418  			if (errno != ESRCH) {
 3419  				(void)jobmgr_assumes_zero(root_jobmgr, errno);
 3420  			} else {
 3421  				jobmgr_log(root_jobmgr, LOG_ERR, "%s[%i] disappeared out from under us (UID: %u EUID: %u)", proc.pbsi_comm, ldc->pid, ldc->uid, ldc->euid);
 3422  			}
 3423  		}
 3424  	}
 3425  
 3426  	return jr;
 3427  }
 3428  
 3429  job_t
 3430  job_find_by_service_port(mach_port_t p)
 3431  {
 3432  	struct machservice *ms;
 3433  
 3434  	LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
 3435  		if (ms->recv && (ms->port == p)) {
 3436  			return ms->job;
 3437  		}
 3438  	}
 3439  
 3440  	return NULL;
 3441  }
 3442  
 3443  void
 3444  job_mig_destructor(job_t j)
 3445  {
 3446  	/* The job can go invalid before this point.
 3447  	 *
 3448  	 * <rdar://problem/5477111>
 3449  	 */
 3450  	if (unlikely(j && (j != workaround_5477111) && j->unload_at_mig_return)) {
 3451  		job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
 3452  		job_remove(j);
 3453  	}
 3454  
 3455  	workaround_5477111 = NULL;
 3456  
 3457  	calendarinterval_sanity_check();
 3458  }
 3459  
 3460  void
 3461  job_export_all2(jobmgr_t jm, launch_data_t where)
 3462  {
 3463  	jobmgr_t jmi;
 3464  	job_t ji;
 3465  
 3466  	SLIST_FOREACH(jmi, &jm->submgrs, sle) {
 3467  		job_export_all2(jmi, where);
 3468  	}
 3469  
 3470  	LIST_FOREACH(ji, &jm->jobs, sle) {
 3471  		launch_data_t tmp;
 3472  
 3473  		if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
 3474  			launch_data_dict_insert(where, tmp, ji->label);
 3475  		}
 3476  	}
 3477  }
 3478  
 3479  launch_data_t
 3480  job_export_all(void)
 3481  {
 3482  	launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
 3483  
 3484  	if (resp != NULL) {
 3485  		job_export_all2(root_jobmgr, resp);
 3486  	} else {
 3487  		(void)os_assumes_zero(errno);
 3488  	}
 3489  
 3490  	return resp;
 3491  }
 3492  
 3493  void
 3494  job_log_stray_pg(job_t j)
 3495  {
 3496  	pid_t *pids = NULL;
 3497  	size_t len = sizeof(pid_t) * get_kern_max_proc();
 3498  	int i = 0, kp_cnt = 0;
 3499  
 3500  	if (!launchd_apple_internal) {
 3501  		return;
 3502  	}
 3503  
 3504  	runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG, j->p, 0, 0);
 3505  
 3506  	if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
 3507  		return;
 3508  	}
 3509  	if (job_assumes_zero_p(j, (kp_cnt = proc_listpgrppids(j->p, pids, len))) == -1) {
 3510  		goto out;
 3511  	}
 3512  
 3513  	for (i = 0; i < kp_cnt; i++) {
 3514  		pid_t p_i = pids[i];
 3515  		if (p_i == j->p) {
 3516  			continue;
 3517  		} else if (p_i == 0 || p_i == 1) {
 3518  			continue;
 3519  		}
 3520  
 3521  		struct proc_bsdshortinfo proc;
 3522  		if (proc_pidinfo(p_i, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
 3523  			if (errno != ESRCH) {
 3524  				(void)job_assumes_zero(j, errno);
 3525  			}
 3526  			continue;
 3527  		}
 3528  
 3529  		pid_t pp_i = proc.pbsi_ppid;
 3530  		const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
 3531  		const char *n = proc.pbsi_comm;
 3532  
 3533  		job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u PGID %u %s", z, p_i, pp_i, proc.pbsi_pgid, n);
 3534  	}
 3535  
 3536  out:
 3537  	free(pids);
 3538  }
 3539  
 3540  #if HAVE_SYSTEMSTATS
 3541  static void
 3542  systemstats_timer_callback(void)
 3543  {
 3544  	jobmgr_log_perf_statistics(root_jobmgr, true);
 3545  }
 3546  
 3547  static bool
 3548  systemstats_is_enabled(void)
 3549  {
 3550  	static bool systemstats_enabled;
 3551  
 3552  	if (!systemstats_enabled) {
 3553  		char *store = launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_LOGS, NULL);
 3554  		systemstats_enabled = systemstats_init(SYSTEMSTATS_WRITER_launchd, store);
 3555  		free(store);
 3556  
 3557  		uint64_t interval;
 3558  		interval = systemstats_get_log_interval(SYSTEMSTATS_WRITER_launchd);
 3559  
 3560  		if (pid1_magic && systemstats_enabled && interval) {
 3561  			jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)systemstats_timer_callback, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, interval, root_jobmgr));
 3562  		}
 3563  	}
 3564  
 3565  	return systemstats_enabled;
 3566  }
 3567  #endif // HAVE_SYSTEMSTATS
 3568  
 3569  void
 3570  job_reap(job_t j)
 3571  {
 3572  	bool is_system_bootstrapper = ((j->is_bootstrapper && pid1_magic) && !j->mgr->parentmgr);
 3573  
 3574  	job_log(j, LOG_DEBUG, "Reaping");
 3575  
 3576  	if (unlikely(j->weird_bootstrap)) {
 3577  		int64_t junk = 0;
 3578  		job_mig_swap_integer(j, VPROC_GSK_WEIRD_BOOTSTRAP, 0, 0, &junk);
 3579  	}
 3580  
 3581  	if (j->fork_fd) {
 3582  		(void)job_assumes_zero_p(j, runtime_close(j->fork_fd));
 3583  		j->fork_fd = 0;
 3584  	}
 3585  
 3586  	bool was_dirty = false;
 3587  	if (!(j->anonymous || j->implicit_reap)) {
 3588  		uint32_t flags = 0;
 3589  		(void)job_assumes_zero(j, proc_get_dirty(j->p, &flags));
 3590  
 3591  		j->idle_exit = (flags & PROC_DIRTY_ALLOWS_IDLE_EXIT);
 3592  		was_dirty = (flags & PROC_DIRTY_IS_DIRTY);
 3593  
 3594  		job_log(j, LOG_DEBUG, "%sob exited %s.", j->idle_exit ? "Idle-exit j" : "J", was_dirty ? "while dirty" : "cleanly");
 3595  	}
 3596  
 3597  	if (j->idle_exit && was_dirty) {
 3598  		if (j->jettisoned) {
 3599  			job_log(j, LOG_NOTICE, "Idle-exit job was jettisoned while dirty. Will respawn immediately.");
 3600  			j->unthrottle = true;
 3601  			j->start_pending = true;
 3602  		} else {
 3603  			job_log(j, LOG_INFO, "Idle-exit job exited while dirty.");
 3604  		}
 3605  	} else if (j->idle_exit && j->jettisoned) {
 3606  		/* If an idle-exit job is jettisoned, then we shouldn't throttle its
 3607  		 * next respawn because it could not help when it exited. If it ran for
 3608  		 * the minimum runtime, then this doesn't really matter. If it ran for
 3609  		 * less than the minimum runtime, it will not be throttled.
 3610  		 *
 3611  		 * <rdar://problem/12098667>
 3612  		 */
 3613  		job_log(j, LOG_NOTICE, "Idle-exit job was jettisoned. Will bypass throttle interval for next on-demand launch.");
 3614  		j->unthrottle = true;
 3615  	}
 3616  
 3617  	if (j->anonymous) {
 3618  		j->last_exit_status = 0;
 3619  	} else {
 3620  		uint64_t rt = runtime_get_nanoseconds_since(j->start_time);
 3621  		j->trt += rt;
 3622  
 3623  		job_log(j, LOG_PERF, "Last instance wall time: %06f", (double)rt / (double)NSEC_PER_SEC);
 3624  		j->nruns++;
 3625  
 3626  		/* The job is dead. While the PID/PGID is still known to be valid, try
 3627  		 * to kill abandoned descendant processes.
 3628  		 */
 3629  		job_log_stray_pg(j);
 3630  		if (!j->abandon_pg) {
 3631  			if (unlikely(killpg2(j->p, SIGTERM) == -1 && errno != ESRCH)) {
 3632  				job_log(j, LOG_APPLEONLY, "Bug: 5487498");
 3633  			}
 3634  		}
 3635  
 3636  		int r = -1;
 3637  		if (!j->implicit_reap) {
 3638  			/* If the shutdown monitor has suspended a task and not resumed it
 3639  			 * resumed it before exiting, the kernel will not clean up after the
 3640  			 * shutdown monitor. It will, instead, leave the task suspended and
 3641  			 * not process any pending signals on the event loop for the task.
 3642  			 *
 3643  			 * There are a variety of other kernel bugs that could prevent a
 3644  			 * process from exiting, usually having to do with faulty hardware
 3645  			 * or talking to misbehaving drivers that mark a thread as
 3646  			 * uninterruptible and deadlock/hang before unmarking it as such. So
 3647  			 * we have to work around that too.
 3648  			 *
 3649  			 * See <rdar://problem/9284889&9359725>.
 3650  			 */
 3651  			if (j->workaround9359725) {
 3652  				job_log(j, LOG_NOTICE, "Simulated exit: <rdar://problem/9359725>");
 3653  				j->last_exit_status = W_EXITCODE(-1, SIGSEGV);
 3654  			} else {
 3655  #if HAVE_SYSTEMSTATS
 3656  				int r2;
 3657  				struct rusage_info_v1 ri;
 3658  				r2 = job_assumes_zero(j, proc_pid_rusage(j->p, RUSAGE_INFO_V1, (rusage_info_t)&ri));
 3659  #endif
 3660  				if ((r = wait4(j->p, &j->last_exit_status, 0, NULL)) == -1) {
 3661  					job_log(j, LOG_ERR, "Reap failed. Assuming job exited: %d: %s", errno, strerror(errno));
 3662  					j->last_exit_status = W_EXITCODE(-1, SIGSEGV);
 3663  				}
 3664  
 3665  				if (j->idle_exit && j->jettisoned) {
 3666  					// Treat idle-exit jettisons as successful exit.
 3667  					//
 3668  					// <rdar://problem/13338973>
 3669  					(void)job_assumes_zero(j, WTERMSIG(j->last_exit_status));
 3670  					j->last_exit_status = W_EXITCODE(0, 0);
 3671  				}
 3672  #if HAVE_SYSTEMSTATS
 3673  				if (r2 == 0) {
 3674  					job_log_perf_statistics(j, &ri, j->last_exit_status);
 3675  				}
 3676  #endif
 3677  			}
 3678  		} else {
 3679  			job_log(j, LOG_INFO, "Job was implicitly reaped by the kernel.");
 3680  		}
 3681  	}
 3682  
 3683  	if (j->exit_timeout) {
 3684  		(void)kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
 3685  	}
 3686  
 3687  	LIST_REMOVE(j, pid_hash_sle);
 3688  	if (!j->anonymous) {
 3689  		LIST_REMOVE(j, global_pid_hash_sle);
 3690  	}
 3691  
 3692  	if (j->sent_signal_time) {
 3693  		uint64_t td_sec, td_usec, td = runtime_get_nanoseconds_since(j->sent_signal_time);
 3694  
 3695  		td_sec = td / NSEC_PER_SEC;
 3696  		td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
 3697  
 3698  		job_log(j, LOG_DEBUG, "Exited %llu.%06llu seconds after the first signal was sent", td_sec, td_usec);
 3699  	}
 3700  
 3701  	int exit_status = WEXITSTATUS(j->last_exit_status);
 3702  	if (WIFEXITED(j->last_exit_status) && exit_status != 0) {
 3703  		if (!j->did_exec && _launchd_support_system) {
 3704  			xpc_object_t event = NULL;
 3705  			switch (exit_status) {
 3706  			case ENOENT:
 3707  			case ENOTDIR:
 3708  			case ESRCH:
 3709  				job_log(j, LOG_NOTICE, "Job failed to exec(3). Setting up event to tell us when to try again: %d: %s", exit_status, strerror(exit_status));
 3710  				event = xpc_dictionary_create(NULL, NULL, 0);
 3711  				xpc_dictionary_set_string(event, "Executable", j->prog ? j->prog : j->argv[0]);
 3712  				if (j->mach_uid) {
 3713  					xpc_dictionary_set_uint64(event, "UID", j->mach_uid);
 3714  				} else if (j->username) {
 3715  					xpc_dictionary_set_string(event, "UserName", j->username);
 3716  				}
 3717  
 3718  				if (j->groupname) {
 3719  					xpc_dictionary_set_string(event, "GroupName", j->groupname);
 3720  				}
 3721  
 3722  				(void)externalevent_new(j, _launchd_support_system, j->label, event, 0);
 3723  				xpc_release(event);
 3724  
 3725  				j->waiting4ok = true;
 3726  			default:
 3727  				job_log(j, LOG_NOTICE, "Job failed to exec(3) for weird reason: %d", exit_status);
 3728  			}
 3729  		} else {
 3730  			int level = LOG_INFO;
 3731  			if (exit_status != 0) {
 3732  				level = LOG_ERR;
 3733  			}
 3734  
 3735  			job_log(j, level, "Exited with code: %d", exit_status);
 3736  		}
 3737  	}
 3738  
 3739  	if (WIFSIGNALED(j->last_exit_status)) {
 3740  		int s = WTERMSIG(j->last_exit_status);
 3741  		if ((SIGKILL == s || SIGTERM == s) && !j->stopped) {
 3742  			job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
 3743  		} else if (!(j->stopped || j->clean_kill || j->jettisoned)) {
 3744  			switch (s) {
 3745  			// Signals which indicate a crash.
 3746  			case SIGILL:
 3747  			case SIGABRT:
 3748  			case SIGFPE:
 3749  			case SIGBUS:
 3750  			case SIGSEGV:
 3751  			case SIGSYS:
 3752  			/* If the kernel has posted NOTE_EXIT and the signal sent to the process was
 3753  			 * SIGTRAP, assume that it's a crash.
 3754  			 */
 3755  			case SIGTRAP:
 3756  				j->crashed = true;
 3757  				job_log(j, LOG_WARNING, "Job appears to have crashed: %s", strsignal(s));
 3758  				break;
 3759  			default:
 3760  				job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
 3761  				break;
 3762  			}
 3763  
 3764  			if (is_system_bootstrapper && j->crashed) {
 3765  				job_log(j, LOG_ERR | LOG_CONSOLE, "The %s bootstrapper has crashed: %s", j->mgr->name, strsignal(s));
 3766  			}
 3767  		}
 3768  	}
 3769  
 3770  	j->reaped = true;
 3771  
 3772  	struct machservice *msi = NULL;
 3773  	if (j->crashed || !(j->did_exec || j->anonymous)) {
 3774  		SLIST_FOREACH(msi, &j->machservices, sle) {
 3775  			if (j->crashed && !msi->isActive && (msi->drain_one_on_crash || msi->drain_all_on_crash)) {
 3776  				machservice_drain_port(msi);
 3777  			}
 3778  
 3779  			if (!j->did_exec && msi->reset && job_assumes(j, !msi->isActive)) {
 3780  				machservice_resetport(j, msi);
 3781  			}
 3782  		}
 3783  	}
 3784  
 3785  	/* HACK: Essentially duplicating the logic directly above. But this has
 3786  	 * gotten really hairy, and I don't want to try consolidating it right now.
 3787  	 */
 3788  	if (j->xpc_service && !j->xpcproxy_did_exec) {
 3789  		job_log(j, LOG_ERR, "XPC Service could not exec(3). Resetting port.");
 3790  		SLIST_FOREACH(msi, &j->machservices, sle) {
 3791  			/* Drain the messages but do not reset the port. If xpcproxy could
 3792  			 * not exec(3), then we don't want to continue trying, since there
 3793  			 * is very likely a serious configuration error with the service.
 3794  			 *
 3795  			 * The above comment is weird. I originally said we should drain
 3796  			 * messages but not reset the port, but that's exactly what we do
 3797  			 * below, and I'm not sure which is the mistake, the comment or the
 3798  			 * actual behavior.
 3799  			 *
 3800  			 * Since it's always been this way, I'll assume that the comment is
 3801  			 * incorrect, but I'll leave it in place just to remind myself to
 3802  			 * actually look into it at some point.
 3803  			 *
 3804  			 * <rdar://problem/8986802>
 3805  			 */
 3806  			if (msi->upfront && job_assumes(j, !msi->isActive)) {
 3807  				machservice_resetport(j, msi);
 3808  			}
 3809  		}
 3810  	}
 3811  
 3812  	struct suspended_peruser *spi = NULL;
 3813  	while ((spi = LIST_FIRST(&j->suspended_perusers))) {
 3814  		job_log(j, LOG_ERR, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi->j->mach_uid);
 3815  		spi->j->peruser_suspend_count--;
 3816  		if (spi->j->peruser_suspend_count == 0) {
 3817  			job_dispatch(spi->j, false);
 3818  		}
 3819  		LIST_REMOVE(spi, sle);
 3820  		free(spi);
 3821  	}
 3822  
 3823  	if (j->exit_status_dest) {
 3824  		errno = helper_downcall_wait(j->exit_status_dest, j->last_exit_status);
 3825  		if (errno && errno != MACH_SEND_INVALID_DEST) {
 3826  			(void)job_assumes_zero(j, errno);
 3827  		}
 3828  
 3829  		j->exit_status_dest = MACH_PORT_NULL;
 3830  	}
 3831  
 3832  	if (j->spawn_reply_port) {
 3833  		/* If the child never called exec(3), we must send a spawn() reply so
 3834  		 * that the requestor can get exit status from it. If we fail to send
 3835  		 * the reply for some reason, we have to deallocate the exit status port
 3836  		 * ourselves.
 3837  		 */
 3838  		kern_return_t kr = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
 3839  		if (kr) {
 3840  			if (kr != MACH_SEND_INVALID_DEST) {
 3841  				(void)job_assumes_zero(j, kr);
 3842  			}
 3843  
 3844  			(void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
 3845  		}
 3846  
 3847  		j->exit_status_port = MACH_PORT_NULL;
 3848  		j->spawn_reply_port = MACH_PORT_NULL;
 3849  	}
 3850  
 3851  	if (j->anonymous) {
 3852  		total_anon_children--;
 3853  		if (j->holds_ref) {
 3854  			job_log(j, LOG_PERF, "Anonymous job exited holding reference.");
 3855  			runtime_del_ref();
 3856  		}
 3857  	} else {
 3858  		job_log(j, LOG_PERF, "Job exited.");
 3859  		runtime_del_ref();
 3860  		total_children--;
 3861  	}
 3862  
 3863  	if (j->has_console) {
 3864  		launchd_wsp = 0;
 3865  	}
 3866  
 3867  	if (j->shutdown_monitor) {
 3868  		job_log(j, LOG_NOTICE | LOG_CONSOLE, "Shutdown monitor has exited.");
 3869  		_launchd_shutdown_monitor = NULL;
 3870  		j->shutdown_monitor = false;
 3871  	}
 3872  
 3873  	if (!j->anonymous) {
 3874  		j->mgr->normal_active_cnt--;
 3875  	}
 3876  	j->sent_signal_time = 0;
 3877  	j->sent_sigkill = false;
 3878  	j->clean_kill = false;
 3879  	j->event_monitor_ready2signal = false;
 3880  	j->p = 0;
 3881  	j->uniqueid = 0;
 3882  }
 3883  
 3884  void
 3885  jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
 3886  {
 3887  	jobmgr_t jmi, jmn;
 3888  	job_t ji, jn;
 3889  
 3890  	if (jm->shutting_down) {
 3891  		return;
 3892  	}
 3893  
 3894  	SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
 3895  		jobmgr_dispatch_all(jmi, newmounthack);
 3896  	}
 3897  
 3898  	LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
 3899  		if (newmounthack && ji->start_on_mount) {
 3900  			ji->start_pending = true;
 3901  		}
 3902  
 3903  		job_dispatch(ji, false);
 3904  	}
 3905  }
 3906  
 3907  void
 3908  job_dispatch_curious_jobs(job_t j)
 3909  {	
 3910  	job_t ji = NULL, jt = NULL;
 3911  	SLIST_FOREACH_SAFE(ji, &s_curious_jobs, curious_jobs_sle, jt) {
 3912  		struct semaphoreitem *si = NULL;
 3913  		SLIST_FOREACH(si, &ji->semaphores, sle) {			
 3914  			if (!(si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED)) {
 3915  				continue;
 3916  			}
 3917  
 3918  			if (strcmp(si->what, j->label) == 0) {
 3919  				job_log(ji, LOG_DEBUG, "Dispatching out of interest in \"%s\".", j->label);
 3920  
 3921  				if (!ji->removing) {
 3922  					job_dispatch(ji, false);
 3923  				} else {
 3924  					job_log(ji, LOG_NOTICE, "The following job is circularly dependent upon this one: %s", j->label);
 3925  				}
 3926  
 3927  				/* ji could be removed here, so don't do anything with it or its semaphores
 3928  				 * after this point.
 3929  				 */
 3930  				break;
 3931  			}
 3932  		}
 3933  	}
 3934  }
 3935  
 3936  job_t
 3937  job_dispatch(job_t j, bool kickstart)
 3938  {
 3939  	// Don't dispatch a job if it has no audit session set.
 3940  	if (!uuid_is_null(j->expected_audit_uuid)) {
 3941  		job_log(j, LOG_DEBUG, "Job is still awaiting its audit session UUID. Not dispatching.");
 3942  		return NULL;
 3943  	}
 3944  	if (j->alias) {
 3945  		job_log(j, LOG_DEBUG, "Job is an alias. Not dispatching.");
 3946  		return NULL;
 3947  	}
 3948  
 3949  	if (j->waiting4ok) {
 3950  		job_log(j, LOG_DEBUG, "Job cannot exec(3). Not dispatching.");
 3951  		return NULL;
 3952  	}
 3953  
 3954  #if TARGET_OS_EMBEDDED
 3955  	if (launchd_embedded_handofgod && _launchd_embedded_god) {
 3956  		if (!job_assumes(j, _launchd_embedded_god->username != NULL && j->username != NULL)) {
 3957  			errno = EPERM;
 3958  			return NULL;
 3959  		}
 3960  
 3961  		if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
 3962  			errno = EPERM;
 3963  			return NULL;
 3964  		}
 3965  	} else if (launchd_embedded_handofgod) {
 3966  		errno = EINVAL;
 3967  		return NULL;
 3968  	}
 3969  #endif
 3970  
 3971  	/*
 3972  	 * The whole job removal logic needs to be consolidated. The fact that
 3973  	 * a job can be removed from just about anywhere makes it easy to have
 3974  	 * stale pointers left behind somewhere on the stack that might get
 3975  	 * used after the deallocation. In particular, during job iteration.
 3976  	 *
 3977  	 * This is a classic example. The act of dispatching a job may delete it.
 3978  	 */	
 3979  	if (!job_active(j)) {
 3980  		if (job_useless(j)) {
 3981  			job_log(j, LOG_DEBUG, "Job is useless. Removing.");
 3982  			job_remove(j);
 3983  			return NULL;
 3984  		}
 3985  		if (unlikely(j->per_user && j->peruser_suspend_count > 0)) {
 3986  			job_log(j, LOG_DEBUG, "Per-user launchd is suspended. Not dispatching.");
 3987  			return NULL;
 3988  		}
 3989  
 3990  		if (kickstart || job_keepalive(j)) {
 3991  			job_log(j, LOG_DEBUG, "%starting job", kickstart ? "Kicks" : "S");
 3992  			job_start(j);
 3993  		} else {
 3994  			job_log(j, LOG_DEBUG, "Watching job.");
 3995  			job_watch(j);
 3996  		}
 3997  	} else {
 3998  		job_log(j, LOG_DEBUG, "Tried to dispatch an already active job: %s.", job_active(j));
 3999  	}
 4000  
 4001  	return j;
 4002  }
 4003  
 4004  void
 4005  job_kill(job_t j)
 4006  {
 4007  	if (unlikely(!j->p || j->anonymous)) {
 4008  		return;
 4009  	}
 4010  
 4011  	(void)job_assumes_zero_p(j, kill2(j->p, SIGKILL));
 4012  
 4013  	j->sent_sigkill = true;
 4014  	(void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j));
 4015  
 4016  	job_log(j, LOG_DEBUG, "Sent SIGKILL signal");
 4017  }
 4018  
 4019  void
 4020  job_open_shutdown_transaction(job_t j)
 4021  {
 4022  	int rv = proc_set_dirty(j->p, true);
 4023  	if (rv != 0) {
 4024  		job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it is not Instant Off-compliant. Treating normally.");
 4025  		j->dirty_at_shutdown = false;
 4026  	}
 4027  }
 4028  
 4029  void
 4030  job_close_shutdown_transaction(job_t j)
 4031  {
 4032  	if (j->dirty_at_shutdown) {
 4033  		job_log(j, LOG_DEBUG, "Closing shutdown transaction for job.");
 4034  		(void)job_assumes_zero(j, proc_set_dirty(j->p, false));
 4035  		j->dirty_at_shutdown = false;
 4036  	}
 4037  }
 4038  
 4039  void
 4040  job_log_children_without_exec(job_t j)
 4041  {
 4042  	pid_t *pids = NULL;
 4043  	size_t len = sizeof(pid_t) * get_kern_max_proc();
 4044  	int i = 0, kp_cnt = 0;
 4045  
 4046  	if (!launchd_apple_internal || j->anonymous || j->per_user) {
 4047  		return;
 4048  	}
 4049  
 4050  	if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
 4051  		return;
 4052  	}
 4053  	if (job_assumes_zero_p(j, (kp_cnt = proc_listchildpids(j->p, pids, len))) == -1) {
 4054  		goto out;
 4055  	}
 4056  
 4057  	for (i = 0; i < kp_cnt; i++) {
 4058  		struct proc_bsdshortinfo proc;
 4059  		if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
 4060  			if (errno != ESRCH) {
 4061  				(void)job_assumes_zero(j, errno);
 4062  			}
 4063  			continue;
 4064  		}
 4065  		if (proc.pbsi_flags & P_EXEC) {
 4066  			continue;
 4067  		}
 4068  
 4069  		job_log(j, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", pids[i]);
 4070  	}
 4071  
 4072  out:
 4073  	free(pids);
 4074  }
 4075  
 4076  void
 4077  job_callback_proc(job_t j, struct kevent *kev)
 4078  {
 4079  	bool program_changed = false;
 4080  	int fflags = kev->fflags;
 4081  
 4082  	job_log(j, LOG_DEBUG, "EVFILT_PROC event for job.");
 4083  	log_kevent_struct(LOG_DEBUG, kev, 0);
 4084  
 4085  	if (fflags & NOTE_EXEC) {
 4086  		program_changed = true;
 4087  
 4088  		if (j->anonymous) {
 4089  			struct proc_bsdshortinfo proc;
 4090  			if (proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0) {
 4091  				char newlabel[1000];
 4092  
 4093  				snprintf(newlabel, sizeof(newlabel), "%p.anonymous.%s", j, proc.pbsi_comm);
 4094  
 4095  				job_log(j, LOG_INFO, "Program changed. Updating the label to: %s", newlabel);
 4096  
 4097  				LIST_REMOVE(j, label_hash_sle);
 4098  				strcpy((char *)j->label, newlabel);
 4099  
 4100  				jobmgr_t where2put = root_jobmgr;
 4101  				if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
 4102  					where2put = j->mgr;
 4103  				}
 4104  				LIST_INSERT_HEAD(&where2put->label_hash[hash_label(j->label)], j, label_hash_sle);
 4105  			} else if (errno != ESRCH) {
 4106  				(void)job_assumes_zero(j, errno);
 4107  			}
 4108  		} else {
 4109  			if (j->spawn_reply_port) {
 4110  				errno = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
 4111  				if (errno) {
 4112  					if (errno != MACH_SEND_INVALID_DEST) {
 4113  						(void)job_assumes_zero(j, errno);
 4114  					}
 4115  					(void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
 4116  				}
 4117  
 4118  				j->spawn_reply_port = MACH_PORT_NULL;
 4119  				j->exit_status_port = MACH_PORT_NULL;
 4120  			}
 4121  
 4122  			if (j->xpc_service && j->did_exec) {
 4123  				j->xpcproxy_did_exec = true;
 4124  			}
 4125  
 4126  			j->did_exec = true;
 4127  			job_log(j, LOG_DEBUG, "Program changed");
 4128  		}
 4129  	}
 4130  
 4131  	if (fflags & NOTE_FORK) {
 4132  		job_log(j, LOG_DEBUG, "fork()ed%s", program_changed ? ". For this message only: We don't know whether this event happened before or after execve()." : "");
 4133  		job_log_children_without_exec(j);
 4134  	}
 4135  
 4136  	if (fflags & NOTE_EXIT) {
 4137  		if (kev->data & NOTE_EXIT_DECRYPTFAIL) {
 4138  			j->fpfail = true;
 4139  			job_log(j, LOG_WARNING, "FairPlay decryption failed on binary for job.");
 4140  		} else if (kev->data & NOTE_EXIT_MEMORY) {
 4141  			j->jettisoned = true;
 4142  			job_log(j, LOG_INFO, "Job was killed due to memory pressure.");
 4143  		}
 4144  
 4145  		job_reap(j);
 4146  
 4147  		if (j->anonymous) {
 4148  			job_remove(j);
 4149  			j = NULL;
 4150  		} else {
 4151  			struct waiting4attach *w4ai = NULL;
 4152  			struct waiting4attach *w4ait = NULL;
 4153  			LIST_FOREACH_SAFE(w4ai, &_launchd_domain_waiters, le, w4ait) {
 4154  				if (w4ai->dest == (pid_t)kev->ident) {
 4155  					waiting4attach_delete(j->mgr, w4ai);
 4156  				}
 4157  			}
 4158  
 4159  			(void)job_dispatch(j, false);
 4160  		}
 4161  	}
 4162  }
 4163  
 4164  void
 4165  job_callback_timer(job_t j, void *ident)
 4166  {
 4167  	if (j == ident) {
 4168  		job_log(j, LOG_DEBUG, "j == ident (%p)", ident);
 4169  		job_dispatch(j, true);
 4170  	} else if (&j->semaphores == ident) {
 4171  		job_log(j, LOG_DEBUG, "&j->semaphores == ident (%p)", ident);
 4172  		job_dispatch(j, false);
 4173  	} else if (&j->start_interval == ident) {
 4174  		job_log(j, LOG_DEBUG, "&j->start_interval == ident (%p)", ident);
 4175  		j->start_pending = true;
 4176  		job_dispatch(j, false);
 4177  	} else if (&j->exit_timeout == ident) {
 4178  		if (!job_assumes(j, j->p != 0)) {
 4179  			return;
 4180  		}
 4181  
 4182  		if (j->sent_sigkill) {
 4183  			uint64_t td = runtime_get_nanoseconds_since(j->sent_signal_time);
 4184  
 4185  			td /= NSEC_PER_SEC;
 4186  			td -= j->clean_kill ? 0 : j->exit_timeout;
 4187  
 4188  			job_log(j, LOG_WARNING | LOG_CONSOLE, "Job has not died after being %skilled %llu seconds ago. Simulating exit.", j->clean_kill ? "cleanly " : "", td);
 4189  			j->workaround9359725 = true;
 4190  
 4191  			// This basically has to be done off the main thread. We have no
 4192  			// mechanism for draining the main queue in our run loop (like CF
 4193  			// does), and the kevent mechanism wants an object to be associated
 4194  			// as the callback. So we just create a dispatch source and reap the
 4195  			// errant PID whenever we can. Note that it is not safe for us to do
 4196  			// any logging in this block, since logging requires exclusive
 4197  			// access to global data structures that is only protected by the
 4198  			// main thread.
 4199  			dispatch_source_t hack_13570156 = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, j->p, DISPATCH_PROC_EXIT, dispatch_get_global_queue(0, 0));
 4200  			dispatch_source_set_event_handler(hack_13570156, ^{
 4201  				pid_t pid = (pid_t)dispatch_source_get_handle(hack_13570156);
 4202  
 4203  				int status = 0;
 4204  				(void)waitpid(pid, &status, 0);
 4205  				dispatch_release(hack_13570156);
 4206  			});
 4207  
 4208  			dispatch_resume(hack_13570156);
 4209  
 4210  			if (launchd_trap_sigkill_bugs) {
 4211  				job_log(j, LOG_NOTICE | LOG_CONSOLE, "Trapping into kernel debugger. You can continue the machine after it has been debugged, and shutdown will proceed normally.");
 4212  				(void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
 4213  			}
 4214  
 4215  			struct kevent bogus_exit;
 4216  			EV_SET(&bogus_exit, j->p, EVFILT_PROC, 0, NOTE_EXIT, 0, 0);
 4217  			jobmgr_callback(j->mgr, &bogus_exit);
 4218  		} else {
 4219  			if (unlikely(j->debug_before_kill)) {
 4220  				job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
 4221  				(void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
 4222  			}
 4223  
 4224  			job_log(j, LOG_WARNING | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Killing", j->exit_timeout);
 4225  			job_kill(j);
 4226  		}
 4227  	} else {
 4228  		job_log(j, LOG_ERR, "Unrecognized job timer callback: %p", ident);
 4229  	}
 4230  }
 4231  
 4232  void
 4233  job_callback_read(job_t j, int ident)
 4234  {
 4235  	if (ident == j->stdin_fd) {
 4236  		job_dispatch(j, true);
 4237  	} else {
 4238  		socketgroup_callback(j);
 4239  	}
 4240  }
 4241  
 4242  void
 4243  jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
 4244  {
 4245  	jobmgr_t jmi;
 4246  	job_t j;
 4247  
 4248  	SLIST_FOREACH(jmi, &jm->submgrs, sle) {
 4249  		jobmgr_reap_bulk(jmi, kev);
 4250  	}
 4251  
 4252  	if ((j = jobmgr_find_by_pid(jm, (pid_t)kev->ident, false))) {
 4253  		kev->udata = j;
 4254  		job_callback(j, kev);
 4255  	}
 4256  }
 4257  
 4258  void
 4259  jobmgr_callback(void *obj, struct kevent *kev)
 4260  {
 4261  	jobmgr_t jm = obj;
 4262  
 4263  #if TARGET_OS_EMBEDDED
 4264  	int flag2check = VQ_MOUNT;
 4265  #else
 4266  	int flag2check = VQ_UPDATE;
 4267  #endif
 4268  
 4269  	switch (kev->filter) {
 4270  	case EVFILT_PROC:
 4271  		jobmgr_reap_bulk(jm, kev);
 4272  		root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
 4273  		break;
 4274  	case EVFILT_SIGNAL:
 4275  		switch (kev->ident) {
 4276  		case SIGTERM:			
 4277  			jobmgr_log(jm, LOG_DEBUG, "Got SIGTERM. Shutting down.");
 4278  			return launchd_shutdown();
 4279  		case SIGUSR1:
 4280  			return calendarinterval_callback();
 4281  		case SIGUSR2:
 4282  			// Turn on all logging.
 4283  			launchd_log_perf = true;
 4284  			launchd_log_debug = true;
 4285  			launchd_log_shutdown = true;
 4286  			/* Hopefully /var is available by this point. If not, uh, oh well.
 4287  			 * It's just a debugging facility.
 4288  			 */
 4289  			return jobmgr_log_perf_statistics(jm, false);
 4290  		case SIGINFO:
 4291  			return jobmgr_log_perf_statistics(jm, true);
 4292  		default:
 4293  			jobmgr_log(jm, LOG_ERR, "Unrecognized signal: %lu: %s", kev->ident, strsignal(kev->ident));
 4294  		}
 4295  		break;
 4296  	case EVFILT_FS:
 4297  		if (kev->fflags & flag2check) {
 4298  			if (!launchd_var_available) {
 4299  				struct stat sb;
 4300  				if (stat("/var/log", &sb) == 0 && (sb.st_mode & S_IWUSR)) {
 4301  					launchd_var_available = true;
 4302  				}
 4303  			}
 4304  		} else if (kev->fflags & VQ_MOUNT) {
 4305  			jobmgr_dispatch_all(jm, true);
 4306  		}
 4307  		jobmgr_dispatch_all_semaphores(jm);
 4308  		break;
 4309  	case EVFILT_TIMER:
 4310  		if (kev->ident == (uintptr_t)&sorted_calendar_events) {
 4311  			calendarinterval_callback();
 4312  		} else if (kev->ident == (uintptr_t)jm) {
 4313  			jobmgr_log(jm, LOG_DEBUG, "Shutdown timer firing.");
 4314  			jobmgr_still_alive_with_check(jm);
 4315  		} else if (kev->ident == (uintptr_t)&jm->reboot_flags) {
 4316  			jobmgr_do_garbage_collection(jm);
 4317  		} else if (kev->ident == (uintptr_t)&launchd_runtime_busy_time) {
 4318  			jobmgr_log(jm, LOG_DEBUG, "Idle exit timer fired. Shutting down.");
 4319  			if (jobmgr_assumes_zero(jm, runtime_busy_cnt) == 0) {
 4320  				return launchd_shutdown();
 4321  			}
 4322  #if HAVE_SYSTEMSTATS
 4323  		} else if (kev->ident == (uintptr_t)systemstats_timer_callback) {
 4324  			systemstats_timer_callback();
 4325  #endif
 4326  		}
 4327  		break;
 4328  	case EVFILT_VNODE:
 4329  		if (kev->ident == (uintptr_t)s_no_hang_fd) {
 4330  			int _no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
 4331  			if (unlikely(_no_hang_fd != -1)) {
 4332  				jobmgr_log(root_jobmgr, LOG_DEBUG, "/dev/autofs_nowait has appeared!");
 4333  				(void)jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL));
 4334  				(void)jobmgr_assumes_zero_p(root_jobmgr, runtime_close(s_no_hang_fd));
 4335  				s_no_hang_fd = _fd(_no_hang_fd);
 4336  			}
 4337  		} else if (pid1_magic && launchd_console && kev->ident == (uintptr_t)fileno(launchd_console)) {
 4338  			int cfd = -1;
 4339  			if (jobmgr_assumes_zero_p(jm, cfd = open(_PATH_CONSOLE, O_WRONLY | O_NOCTTY)) != -1) {
 4340  				_fd(cfd);
 4341  				if (!(launchd_console = fdopen(cfd, "w"))) {
 4342  					(void)jobmgr_assumes_zero(jm, errno);
 4343  					(void)close(cfd);
 4344  				}
 4345  			}
 4346  		}
 4347  		break;
 4348  	default:
 4349  		jobmgr_log(jm, LOG_ERR, "Unrecognized kevent filter: %hd", kev->filter);
 4350  	}
 4351  }
 4352  
 4353  void
 4354  job_callback(void *obj, struct kevent *kev)
 4355  {
 4356  	job_t j = obj;
 4357  
 4358  	job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
 4359  
 4360  	switch (kev->filter) {
 4361  	case EVFILT_PROC:
 4362  		return job_callback_proc(j, kev);
 4363  	case EVFILT_TIMER:
 4364  		return job_callback_timer(j, (void *) kev->ident);
 4365  	case EVFILT_READ:
 4366  		return job_callback_read(j, (int) kev->ident);
 4367  	case EVFILT_MACHPORT:
 4368  		return (void)job_dispatch(j, true);
 4369  	default:
 4370  		job_log(j, LOG_ERR, "Unrecognized job callback filter: %hd", kev->filter);
 4371  	}
 4372  }
 4373  
 4374  void
 4375  job_start(job_t j)
 4376  {
 4377  	uint64_t td;
 4378  	int spair[2];
 4379  	int execspair[2];
 4380  	char nbuf[64];
 4381  	pid_t c;
 4382  	bool sipc = false;
 4383  	u_int proc_fflags = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_EXIT_DETAIL|NOTE_EXITSTATUS;
 4384  
 4385  	if (!job_assumes(j, j->mgr != NULL)) {
 4386  		return;
 4387  	}
 4388  
 4389  	if (unlikely(job_active(j))) {
 4390  		job_log(j, LOG_DEBUG, "Already started");
 4391  		return;
 4392  	}
 4393  
 4394  	if (!LIST_EMPTY(&j->mgr->attaches)) {
 4395  		job_log(j, LOG_DEBUG, "Looking for attachments for job: %s", j->label);
 4396  		(void)waiting4attach_find(j->mgr, j);
 4397  	}
 4398  
 4399  	/*
 4400  	 * Some users adjust the wall-clock and then expect software to not notice.
 4401  	 * Therefore, launchd must use an absolute clock instead of the wall clock
 4402  	 * wherever possible.
 4403  	 */
 4404  	td = runtime_get_nanoseconds_since(j->start_time);
 4405  	td /= NSEC_PER_SEC;
 4406  
 4407  	if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat && !j->unthrottle) {
 4408  		time_t respawn_delta = j->min_run_time - (uint32_t)td;
 4409  		/* We technically should ref-count throttled jobs to prevent idle exit,
 4410  		 * but we're not directly tracking the 'throttled' state at the moment.
 4411  		 */
 4412  		job_log(j, LOG_NOTICE, "Throttling respawn: Will start in %ld seconds", respawn_delta);
 4413  		(void)job_assumes_zero_p(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j));
 4414  		job_ignore(j);
 4415  		return;
 4416  	}
 4417  
 4418  	if (likely(!j->legacy_mach_job)) {
 4419  		sipc = ((!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices)) && !j->deny_job_creation) || j->embedded_god;
 4420  	}
 4421  
 4422  	if (sipc) {
 4423  		(void)job_assumes_zero_p(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair));
 4424  	}
 4425  
 4426  	(void)job_assumes_zero_p(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair));
 4427  
 4428  	switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
 4429  	case -1:
 4430  		job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
 4431  		(void)job_assumes_zero_p(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, 1, j));
 4432  		job_ignore(j);
 4433  
 4434  		(void)job_assumes_zero(j, runtime_close(execspair[0]));
 4435  		(void)job_assumes_zero(j, runtime_close(execspair[1]));
 4436  		if (sipc) {
 4437  			(void)job_assumes_zero(j, runtime_close(spair[0]));
 4438  			(void)job_assumes_zero(j, runtime_close(spair[1]));
 4439  		}
 4440  		break;
 4441  	case 0:
 4442  		if (unlikely(_vproc_post_fork_ping())) {
 4443  			_exit(EXIT_FAILURE);
 4444  		}
 4445  
 4446  		(void)job_assumes_zero(j, runtime_close(execspair[0]));
 4447  		// wait for our parent to say they've attached a kevent to us
 4448  		read(_fd(execspair[1]), &c, sizeof(c));
 4449  
 4450  		if (sipc) {
 4451  			(void)job_assumes_zero(j, runtime_close(spair[0]));
 4452  			snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
 4453  			setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
 4454  		}
 4455  		job_start_child(j);
 4456  		break;
 4457  	default:
 4458  		j->start_time = runtime_get_opaque_time();
 4459  
 4460  		job_log(j, LOG_DEBUG, "Started as PID: %u", c);
 4461  
 4462  		j->did_exec = false;
 4463  		j->fpfail = false;
 4464  		j->jettisoned = false;
 4465  		j->xpcproxy_did_exec = false;
 4466  		j->checkedin = false;
 4467  		j->start_pending = false;
 4468  		j->reaped = false;
 4469  		j->crashed = false;
 4470  		j->stopped = false;
 4471  		j->workaround9359725 = false;
 4472  		j->implicit_reap = false;
 4473  		j->unthrottle = false;
 4474  		if (j->needs_kickoff) {
 4475  			j->needs_kickoff = false;
 4476  
 4477  			if (SLIST_EMPTY(&j->semaphores)) {
 4478  				j->ondemand = false;
 4479  			}
 4480  		}
 4481  
 4482  		if (j->has_console) {
 4483  			launchd_wsp = c;
 4484  		}
 4485  
 4486  		job_log(j, LOG_PERF, "Job started.");
 4487  		runtime_add_ref();
 4488  		total_children++;
 4489  		LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
 4490  		LIST_INSERT_HEAD(&managed_actives[ACTIVE_JOB_HASH(c)], j, global_pid_hash_sle);
 4491  		j->p = c;
 4492  
 4493  		struct proc_uniqidentifierinfo info;
 4494  		if (proc_pidinfo(c, PROC_PIDUNIQIDENTIFIERINFO, 0, &info, PROC_PIDUNIQIDENTIFIERINFO_SIZE) != 0) {
 4495  			// ignore errors here, kevent_mod below will catch them and clean up
 4496  			j->uniqueid = info.p_uniqueid;
 4497  		}
 4498  
 4499  		j->mgr->normal_active_cnt++;
 4500  		j->fork_fd = _fd(execspair[0]);
 4501  		(void)job_assumes_zero(j, runtime_close(execspair[1]));
 4502  		if (sipc) {
 4503  			(void)job_assumes_zero(j, runtime_close(spair[1]));
 4504  			ipc_open(_fd(spair[0]), j);
 4505  		}
 4506  		if (kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1) {
 4507  			job_ignore(j);
 4508  		} else {
 4509  			if (errno == ESRCH) {
 4510  				job_log(j, LOG_ERR, "Child was killed before we could attach a kevent.");
 4511  			} else {
 4512  				(void)job_assumes(j, errno == ESRCH);
 4513  			}
 4514  			job_reap(j);
 4515  
 4516  			/* If we have reaped this job within this same run loop pass, then
 4517  			 * it will be currently ignored. So if there's a failure to attach a
 4518  			 * kevent, we need to make sure that we watch the job so that we can
 4519  			 * respawn it.
 4520  			 *
 4521  			 * See <rdar://problem/10140809>.
 4522  			 */
 4523  			job_watch(j);
 4524  		}
 4525  
 4526  #if HAVE_SYSTEMSTATS
 4527  		if (systemstats_is_enabled()) {
 4528  			/* We don't really *need* to make the full rusage call -- it
 4529  			 * will be mostly 0s and very small numbers. We only need
 4530  			 * ri_proc_start_abstime, because that's how we disambiguiate
 4531  			 * PIDs when they wrap around; and the UUID.
 4532  			 * In the future we should use the 64-bit process unique ID,
 4533  			 * so there's nothing to disambiguiate, and skip the full
 4534  			 * rusage call here.
 4535  			 *
 4536  			 * Well, the future is now.
 4537  			 */
 4538  			if (_systemstats_get_property(SYSTEMSTATS_API_VERSION, SYSTEMSTATS_WRITER_launchd, SYSTEMSTATS_PROPERTY_LAUNCHD_SHOULD_LOG_JOB_START)) {
 4539  				job_log_perf_statistics(j, NULL, -3);
 4540  			}
 4541  		}
 4542  #endif
 4543  		j->wait4debugger_oneshot = false;
 4544  		if (likely(!j->stall_before_exec)) {
 4545  			job_uncork_fork(j);
 4546  		}
 4547  		break;
 4548  	}
 4549  }
 4550  
 4551  void
 4552  job_start_child(job_t j)
 4553  {
 4554  	typeof(posix_spawn) *psf;
 4555  	const char *file2exec = "/usr/libexec/launchproxy";
 4556  	const char **argv;
 4557  	posix_spawnattr_t spattr;
 4558  	int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
 4559  	glob_t g;
 4560  	short spflags = POSIX_SPAWN_SETEXEC;
 4561  	int psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
 4562  	size_t binpref_out_cnt = 0;
 4563  	size_t i;
 4564  
 4565  	(void)job_assumes_zero(j, posix_spawnattr_init(&spattr));
 4566  
 4567  	job_setup_attributes(j);
 4568  
 4569  	bool use_xpcproxy = false;
 4570  	struct waiting4attach *w4a = waiting4attach_find(j->mgr, j);
 4571  	if (w4a) {
 4572  		(void)setenv(XPC_SERVICE_ENV_ATTACHED, "1", 1);
 4573  		if (!j->xpc_service) {
 4574  			use_xpcproxy = true;
 4575  		}
 4576  	}
 4577  
 4578  	if (use_xpcproxy) {
 4579  		argv = alloca(3 * sizeof(char *));
 4580  		argv[0] = "/usr/libexec/xpcproxy";
 4581  		argv[1] = "-debug";
 4582  		argv[2] = NULL;
 4583  
 4584  		file2exec = argv[0];
 4585  	} else if (unlikely(j->argv && j->globargv)) {
 4586  		g.gl_offs = 1;
 4587  		for (i = 0; i < j->argc; i++) {
 4588  			if (i > 0) {
 4589  				gflags |= GLOB_APPEND;
 4590  			}
 4591  			if (glob(j->argv[i], gflags, NULL, &g) != 0) {
 4592  				job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
 4593  				exit(EXIT_FAILURE);
 4594  			}
 4595  		}
 4596  		g.gl_pathv[0] = (char *)file2exec;
 4597  		argv = (const char **)g.gl_pathv;
 4598  	} else if (likely(j->argv)) {
 4599  		argv = alloca((j->argc + 2) * sizeof(char *));
 4600  		argv[0] = file2exec;
 4601  		for (i = 0; i < j->argc; i++) {
 4602  			argv[i + 1] = j->argv[i];
 4603  		}
 4604  		argv[i + 1] = NULL;
 4605  	} else {
 4606  		argv = alloca(3 * sizeof(char *));
 4607  		argv[0] = file2exec;
 4608  		argv[1] = j->prog;
 4609  		argv[2] = NULL;
 4610  	}
 4611  
 4612  	if (likely(!(j->inetcompat || use_xpcproxy))) {
 4613  		argv++;
 4614  	}
 4615  
 4616  	if (unlikely(j->wait4debugger || j->wait4debugger_oneshot)) {
 4617  		if (!j->app) {
 4618  			job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
 4619  		}
 4620  		spflags |= POSIX_SPAWN_START_SUSPENDED;
 4621  	}
 4622  
 4623  #if !TARGET_OS_EMBEDDED
 4624  	if (unlikely(j->disable_aslr)) {
 4625  		spflags |= _POSIX_SPAWN_DISABLE_ASLR;
 4626  	}
 4627  #endif
 4628  	spflags |= j->pstype;
 4629  
 4630  	(void)job_assumes_zero(j, posix_spawnattr_setflags(&spattr, spflags));
 4631  	if (unlikely(j->j_binpref_cnt)) {
 4632  		(void)job_assumes_zero(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt));
 4633  		(void)job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
 4634  	}
 4635  
 4636  	psproctype = j->psproctype;
 4637  	(void)job_assumes_zero(j, posix_spawnattr_setprocesstype_np(&spattr, psproctype));
 4638  
 4639  #if TARGET_OS_EMBEDDED
 4640  	/* Set jetsam attributes. POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY guards
 4641  	 * against a race which arises if, during spawn, an initial jetsam property
 4642  	 * update occurs before the values below are applied. In this case, the flag
 4643  	 * ensures that the subsequent change is ignored; the explicit update should
 4644  	 * be given priority.
 4645  	 */
 4646  	(void)job_assumes_zero(j, posix_spawnattr_setjetsam(&spattr,
 4647  	        POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY | (j->jetsam_memory_limit_background ? POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND : 0), 
 4648  	        j->jetsam_priority, j->jetsam_memlimit));
 4649  #endif
 4650  
 4651  	mach_port_array_t sports = NULL;
 4652  	mach_msg_type_number_t sports_cnt = 0;
 4653  	kern_return_t kr = vproc_mig_get_listener_port_rights(bootstrap_port, &sports, &sports_cnt);
 4654  	if (kr == 0 && sports_cnt) {
 4655  		/* For some reason, this SPI takes a count as a signed quantity. */
 4656  		(void)posix_spawnattr_set_importancewatch_port_np(&spattr, (int)sports_cnt, sports);
 4657  
 4658  		/* All "count" parameters in MIG are counts of the array. So an array of
 4659  		 * mach_port_t containing 10 elements will have a count of ten, but it
 4660  		 * will occupy 40 bytes. So we must do the multiplication here to pass
 4661  		 * the correct size.
 4662  		 *
 4663  		 * Note that we do NOT release the send rights. We need them to be valid
 4664  		 * at the time they are passed to posix_spawn(2). When we exec(3) using
 4665  		 * posix_spawn(2), they'll be cleaned up anyway.
 4666  		 */
 4667  		mig_deallocate((vm_address_t)sports, sports_cnt * sizeof(sports[0]));
 4668  	} else if (kr != BOOTSTRAP_UNKNOWN_SERVICE) {
 4669  		(void)job_assumes_zero(j, kr);
 4670  	}
 4671  
 4672  #if TARGET_OS_EMBEDDED
 4673  	if (!j->app || j->system_app) {
 4674  		(void)job_assumes_zero(j, posix_spawnattr_setcpumonitor_default(&spattr));
 4675  	}
 4676  #else
 4677  	(void)job_assumes_zero(j, posix_spawnattr_setcpumonitor_default(&spattr));
 4678  #endif
 4679  
 4680  #if !TARGET_OS_EMBEDDED
 4681  	struct task_qos_policy qosinfo = {
 4682  		.task_latency_qos_tier = LATENCY_QOS_LAUNCH_DEFAULT_TIER,
 4683  		.task_throughput_qos_tier = THROUGHPUT_QOS_LAUNCH_DEFAULT_TIER,
 4684  	};
 4685  
 4686  	if (!j->legacy_timers) {
 4687  		kr = task_policy_set(mach_task_self(), TASK_BASE_QOS_POLICY, (task_policy_t)&qosinfo, TASK_QOS_POLICY_COUNT);
 4688  		(void)job_assumes_zero_p(j, kr);
 4689  	}
 4690  #endif
 4691  
 4692  #if HAVE_RESPONSIBILITY
 4693  	/* Specify which process is responsible for the new job.  Per-app XPC
 4694  	 * services are the responsibility of the app.  Other processes are
 4695  	 * responsible for themselves.  This decision is final and also applies
 4696  	 * to the process's children, so don't initialize responsibility when
 4697  	 * starting a per-user launchd.
 4698  	 */
 4699  	if (j->mgr->req_pid) {
 4700  		responsibility_init2(j->mgr->req_pid, NULL);
 4701  	} else if (!j->per_user) {
 4702  		responsibility_init2(getpid(), j->prog ? j->prog : j->argv[0]);
 4703  	}
 4704  #endif
 4705  
 4706  #if HAVE_QUARANTINE
 4707  	if (j->quarantine_data) {
 4708  		qtn_proc_t qp;
 4709  
 4710  		if (job_assumes(j, qp = qtn_proc_alloc())) {
 4711  			if (job_assumes_zero(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
 4712  				(void)job_assumes_zero(j, qtn_proc_apply_to_self(qp));
 4713  			}
 4714  		}
 4715  	}
 4716  #endif
 4717  
 4718  #if HAVE_SANDBOX
 4719  #if TARGET_OS_EMBEDDED
 4720  	struct sandbox_spawnattrs sbattrs;
 4721  	if (j->seatbelt_profile || j->container_identifier) {
 4722  		sandbox_spawnattrs_init(&sbattrs);
 4723  		if (j->seatbelt_profile) {
 4724  			sandbox_spawnattrs_setprofilename(&sbattrs, j->seatbelt_profile);
 4725  		}
 4726  		if (j->container_identifier) {
 4727  			sandbox_spawnattrs_setcontainer(&sbattrs, j->container_identifier);
 4728  		}
 4729  		(void)job_assumes_zero(j, posix_spawnattr_setmacpolicyinfo_np(&spattr, "Sandbox", &sbattrs, sizeof(sbattrs)));
 4730  	}
 4731  #else
 4732  	if (j->seatbelt_profile) {
 4733  		char *seatbelt_err_buf = NULL;
 4734  
 4735  		if (job_assumes_zero_p(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf)) == -1) {
 4736  			if (seatbelt_err_buf) {
 4737  				job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
 4738  			}
 4739  			goto out_bad;
 4740  		}
 4741  	}
 4742  #endif
 4743  #endif
 4744  
 4745  	psf = j->prog ? posix_spawn : posix_spawnp;
 4746  
 4747  	if (likely(!(j->inetcompat || use_xpcproxy))) {
 4748  		file2exec = j->prog ? j->prog : argv[0];
 4749  	}
 4750  
 4751  	errno = psf(NULL, file2exec, NULL, &spattr, (char *const *)argv, environ);
 4752  
 4753  #if HAVE_SANDBOX && !TARGET_OS_EMBEDDED
 4754  out_bad:
 4755  #endif
 4756  	_exit(errno);
 4757  }
 4758  
 4759  void
 4760  jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
 4761  {
 4762  	launch_data_t tmp;
 4763  	struct envitem *ei;
 4764  	job_t ji;
 4765  
 4766  	if (jm->parentmgr) {
 4767  		jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
 4768  	} else {
 4769  		char **tmpenviron = environ;
 4770  		for (; *tmpenviron; tmpenviron++) {
 4771  			char envkey[1024];
 4772  			launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
 4773  			launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
 4774  			strncpy(envkey, *tmpenviron, sizeof(envkey));
 4775  			*(strchr(envkey, '=')) = '\0';
 4776  			launch_data_dict_insert(dict, s, envkey);
 4777  		}
 4778  	}
 4779  
 4780  	LIST_FOREACH(ji, &jm->jobs, sle) {
 4781  		SLIST_FOREACH(ei, &ji->global_env, sle) {
 4782  			if ((tmp = launch_data_new_string(ei->value))) {
 4783  				launch_data_dict_insert(dict, tmp, ei->key);
 4784  			}
 4785  		}
 4786  	}
 4787  }
 4788  
 4789  void
 4790  jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
 4791  {
 4792  	struct envitem *ei;
 4793  	job_t ji;
 4794  
 4795  	if (jm->parentmgr) {
 4796  		jobmgr_setup_env_from_other_jobs(jm->parentmgr);
 4797  	}
 4798  
 4799  	LIST_FOREACH(ji, &jm->global_env_jobs, global_env_sle) {
 4800  		SLIST_FOREACH(ei, &ji->global_env, sle) {
 4801  			setenv(ei->key, ei->value, 1);
 4802  		}
 4803  	}
 4804  }
 4805  
 4806  void
 4807  job_log_pids_with_weird_uids(job_t j)
 4808  {
 4809  	size_t len = sizeof(pid_t) * get_kern_max_proc();
 4810  	pid_t *pids = NULL;
 4811  	uid_t u = j->mach_uid;
 4812  	int i = 0, kp_cnt = 0;
 4813  
 4814  	if (!launchd_apple_internal) {
 4815  		return;
 4816  	}
 4817  
 4818  	pids = malloc(len);
 4819  	if (!job_assumes(j, pids != NULL)) {
 4820  		return;
 4821  	}
 4822  
 4823  	runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS, j->p, u, 0);
 4824  
 4825  	/* libproc actually has some serious performance drawbacks when used over sysctl(3) in
 4826  	 * scenarios like this. Whereas sysctl(3) can give us back all the kinfo_proc's in
 4827  	 * one kernel call, libproc requires that we get a list of PIDs we're interested in
 4828  	 * (in this case, all PIDs on the system) and then get a single proc_bsdshortinfo
 4829  	 * struct back in a single call for each one.
 4830  	 *
 4831  	 * This kind of thing is also more inherently racy than sysctl(3). While sysctl(3)
 4832  	 * returns a snapshot, it returns the whole shebang at once. Any PIDs given to us by
 4833  	 * libproc could go stale before we call proc_pidinfo().
 4834  	 *
 4835  	 * Note that proc_list*() APIs return the number of PIDs given back, not the number
 4836  	 * of bytes written to the buffer.
 4837  	 */
 4838  	if (job_assumes_zero_p(j, (kp_cnt = proc_listallpids(pids, len))) == -1) {
 4839  		goto out;
 4840  	}
 4841  
 4842  	for (i = 0; i < kp_cnt; i++) {
 4843  		struct proc_bsdshortinfo proc;
 4844  		/* We perhaps should not log a bug here if we get ESRCH back, due to the race
 4845  		 * detailed above.
 4846  		 */
 4847  		if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
 4848  			if (errno != ESRCH) {
 4849  				(void)job_assumes_zero(j, errno);
 4850  			}
 4851  			continue;
 4852  		}
 4853  
 4854  		uid_t i_euid = proc.pbsi_uid;
 4855  		uid_t i_uid = proc.pbsi_ruid;
 4856  		uid_t i_svuid = proc.pbsi_svuid;
 4857  		pid_t i_pid = pids[i];
 4858  
 4859  		if (i_euid != u && i_uid != u && i_svuid != u) {
 4860  			continue;
 4861  		}
 4862  
 4863  		job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u", i_pid, proc.pbsi_comm, i_uid, i_euid, i_svuid);
 4864  
 4865  // Temporarily disabled due to 5423935 and 4946119.
 4866  #if 0
 4867  		// Ask the accountless process to exit.
 4868  		(void)job_assumes_zero_p(j, kill2(i_pid, SIGTERM));
 4869  #endif
 4870  	}
 4871  
 4872  out:
 4873  	free(pids);
 4874  }
 4875  
 4876  static struct passwd *
 4877  job_getpwnam(job_t j, const char *name)
 4878  {
 4879  	/*
 4880  	 * methodology for system daemons
 4881  	 *
 4882  	 * first lookup user record without any opendirectoryd interaction,
 4883  	 * we don't know what interprocess dependencies might be in flight.
 4884  	 * if that fails, we re-enable opendirectoryd interaction and 
 4885  	 * re-issue the lookup.  We have to disable the libinfo L1 cache
 4886  	 * otherwise libinfo will return the negative cache entry on the retry
 4887  	 */
 4888  #if !TARGET_OS_EMBEDDED
 4889  	struct passwd *pw = NULL;
 4890  
 4891  	if (pid1_magic && j->mgr == root_jobmgr) {
 4892  		// 1 == SEARCH_MODULE_FLAG_DISABLED
 4893  		si_search_module_set_flags("ds", 1);
 4894  		gL1CacheEnabled = false;
 4895  
 4896  		pw = getpwnam(name);
 4897  		si_search_module_set_flags("ds", 0);
 4898  	}
 4899  
 4900  	if (pw == NULL) {
 4901  		pw = getpwnam(name);
 4902  	}
 4903  
 4904  	return pw;
 4905  #else
 4906  #pragma unused (j)
 4907  	return getpwnam(name);
 4908  #endif
 4909  }
 4910  
 4911  static struct group *
 4912  job_getgrnam(job_t j, const char *name)
 4913  {
 4914  #if !TARGET_OS_EMBEDDED
 4915      struct group *gr = NULL;
 4916  
 4917      if (pid1_magic && j->mgr == root_jobmgr) {
 4918          si_search_module_set_flags("ds", 1);
 4919          gL1CacheEnabled = false;
 4920  
 4921          gr = getgrnam(name);
 4922  
 4923          si_search_module_set_flags("ds", 0);
 4924      }
 4925  
 4926      if (gr == NULL) {
 4927          gr = getgrnam(name);
 4928      }
 4929  
 4930      return gr;
 4931  #else
 4932  #pragma unused (j)
 4933      return getgrnam(name);
 4934  #endif
 4935  }
 4936  
 4937  void
 4938  job_postfork_test_user(job_t j)
 4939  {
 4940  	// This function is all about 5201578
 4941  
 4942  	const char *home_env_var = getenv("HOME");
 4943  	const char *user_env_var = getenv("USER");
 4944  	const char *logname_env_var = getenv("LOGNAME");
 4945  	uid_t tmp_uid, local_uid = getuid();
 4946  	gid_t tmp_gid, local_gid = getgid();
 4947  	char shellpath[PATH_MAX];
 4948  	char homedir[PATH_MAX];
 4949  	char loginname[2000];
 4950  	struct passwd *pwe;
 4951  
 4952  
 4953  	if (!job_assumes(j, home_env_var && user_env_var && logname_env_var
 4954  				&& strcmp(user_env_var, logname_env_var) == 0)) {
 4955  		goto out_bad;
 4956  	}
 4957  
 4958  	if ((pwe = job_getpwnam(j, user_env_var)) == NULL) {
 4959  		job_log(j, LOG_ERR, "The account \"%s\" has been deleted out from under us!", user_env_var);
 4960  		goto out_bad;
 4961  	}
 4962  
 4963  	/*
 4964  	 * We must copy the results of getpw*().
 4965  	 *
 4966  	 * Why? Because subsequent API calls may call getpw*() as a part of
 4967  	 * their implementation. Since getpw*() returns a [now thread scoped]
 4968  	 * global, we must therefore cache the results before continuing.
 4969  	 */
 4970  
 4971  	tmp_uid = pwe->pw_uid;
 4972  	tmp_gid = pwe->pw_gid;
 4973  
 4974  	strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
 4975  	strlcpy(loginname, pwe->pw_name, sizeof(loginname));
 4976  	strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
 4977  
 4978  	if (strcmp(loginname, logname_env_var) != 0) {
 4979  		job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "USER");
 4980  		goto out_bad;
 4981  	}
 4982  	if (strcmp(homedir, home_env_var) != 0) {
 4983  		job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "HOME");
 4984  		goto out_bad;
 4985  	}
 4986  	if (local_uid != tmp_uid) {
 4987  		job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
 4988  				'U', tmp_uid, local_uid);
 4989  		goto out_bad;
 4990  	}
 4991  	if (local_gid != tmp_gid) {
 4992  		job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
 4993  				'G', tmp_gid, local_gid);
 4994  		goto out_bad;
 4995  	}
 4996  
 4997  	return;
 4998  out_bad:
 4999  #if 0
 5000  	(void)job_assumes_zero_p(j, kill2(getppid(), SIGTERM));
 5001  	_exit(EXIT_FAILURE);
 5002  #else
 5003  	job_log(j, LOG_WARNING, "In a future build of the OS, this error will be fatal.");
 5004  #endif
 5005  }
 5006  
 5007  void
 5008  job_postfork_become_user(job_t j)
 5009  {
 5010  	char loginname[2000];
 5011  	char tmpdirpath[PATH_MAX];
 5012  	char shellpath[PATH_MAX];
 5013  	char homedir[PATH_MAX];
 5014  	struct passwd *pwe;
 5015  	size_t r;
 5016  	gid_t desired_gid = -1;
 5017  	uid_t desired_uid = -1;
 5018  
 5019  	if (getuid() != 0) {
 5020  		return job_postfork_test_user(j);
 5021  	}
 5022  
 5023  	/*
 5024  	 * I contend that having UID == 0 and GID != 0 is of dubious value.
 5025  	 * Nevertheless, this used to work in Tiger. See: 5425348
 5026  	 */
 5027  	if (j->groupname && !j->username) {
 5028  		j->username = "root";
 5029  	}
 5030  
 5031  	if (j->username) {
 5032  		if ((pwe = job_getpwnam(j, j->username)) == NULL) {
 5033  			job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
 5034  			_exit(ESRCH);
 5035  		}
 5036  	} else if (j->mach_uid) {
 5037  		if ((pwe = getpwuid(j->mach_uid)) == NULL) {
 5038  			job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
 5039  			job_log_pids_with_weird_uids(j);
 5040  			_exit(ESRCH);
 5041  		}
 5042  	} else {
 5043  		return;
 5044  	}
 5045  
 5046  	/*
 5047  	 * We must copy the results of getpw*().
 5048  	 *
 5049  	 * Why? Because subsequent API calls may call getpw*() as a part of
 5050  	 * their implementation. Since getpw*() returns a [now thread scoped]
 5051  	 * global, we must therefore cache the results before continuing.
 5052  	 */
 5053  
 5054  	desired_uid = pwe->pw_uid;
 5055  	desired_gid = pwe->pw_gid;
 5056  
 5057  	strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
 5058  	strlcpy(loginname, pwe->pw_name, sizeof(loginname));
 5059  	strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
 5060  
 5061  	if (unlikely(pwe->pw_expire && time(NULL) >= pwe->pw_expire)) {
 5062  		job_log(j, LOG_ERR, "Expired account");
 5063  		_exit(EXIT_FAILURE);
 5064  	}
 5065  
 5066  
 5067  	if (unlikely(j->username && strcmp(j->username, loginname) != 0)) {
 5068  		job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
 5069  	} else if (unlikely(j->mach_uid && (j->mach_uid != desired_uid))) {
 5070  		job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
 5071  	}
 5072  
 5073  	if (j->groupname) {
 5074  		struct group *gre;
 5075  
 5076  		if (unlikely((gre = job_getgrnam(j, j->groupname)) == NULL)) {
 5077  			job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
 5078  			_exit(ESRCH);
 5079  		}
 5080  
 5081  		desired_gid = gre->gr_gid;
 5082  	}
 5083  
 5084  	if (job_assumes_zero_p(j, setlogin(loginname)) == -1) {
 5085  		_exit(EXIT_FAILURE);
 5086  	}
 5087  
 5088  	if (job_assumes_zero_p(j, setgid(desired_gid)) == -1) {
 5089  		_exit(EXIT_FAILURE);
 5090  	}
 5091  
 5092  	/*
 5093  	 * The kernel team and the DirectoryServices team want initgroups()
 5094  	 * called after setgid(). See 4616864 for more information.
 5095  	 */
 5096  
 5097  	if (likely(!j->no_init_groups)) {
 5098  #if 1
 5099  		if (job_assumes_zero_p(j, initgroups(loginname, desired_gid)) == -1) {
 5100  			_exit(EXIT_FAILURE);
 5101  		}
 5102  #else
 5103  		/* Do our own little initgroups(). We do this to guarantee that we're
 5104  		 * always opted into dynamic group resolution in the kernel. initgroups(3)
 5105  		 * does not make this guarantee.
 5106  		 */
 5107  		int groups[NGROUPS], ngroups;
 5108  
 5109  		// A failure here isn't fatal, and we'll still get data we can use.
 5110  		(void)job_assumes_zero_p(j, getgrouplist(j->username, desired_gid, groups, &ngroups));
 5111  
 5112  		if (job_assumes_zero_p(j, syscall(SYS_initgroups, ngroups, groups, desired_uid)) == -1) {
 5113  			_exit(EXIT_FAILURE);
 5114  		}
 5115  #endif
 5116  	}
 5117  
 5118  	if (job_assumes_zero_p(j, setuid(desired_uid)) == -1) {
 5119  		_exit(EXIT_FAILURE);
 5120  	}
 5121  
 5122  	r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
 5123  
 5124  	if (likely(r > 0 && r < sizeof(tmpdirpath))) {
 5125  		setenv("TMPDIR", tmpdirpath, 0);
 5126  	}
 5127  
 5128  	setenv("SHELL", shellpath, 0);
 5129  	setenv("HOME", homedir, 0);
 5130  	setenv("USER", loginname, 0);
 5131  	setenv("LOGNAME", loginname, 0);
 5132  }
 5133  
 5134  void
 5135  job_setup_attributes(job_t j)
 5136  {
 5137  	struct limititem *li;
 5138  	struct envitem *ei;
 5139  
 5140  	if (unlikely(j->setnice)) {
 5141  		(void)job_assumes_zero_p(j, setpriority(PRIO_PROCESS, 0, j->nice));
 5142  	}
 5143  
 5144  	SLIST_FOREACH(li, &j->limits, sle) {
 5145  		struct rlimit rl;
 5146  
 5147  		if (job_assumes_zero_p(j, getrlimit(li->which, &rl) == -1)) {
 5148  			continue;
 5149  		}
 5150  
 5151  		if (li->sethard) {
 5152  			rl.rlim_max = li->lim.rlim_max;
 5153  		}
 5154  		if (li->setsoft) {
 5155  			rl.rlim_cur = li->lim.rlim_cur;
 5156  		}
 5157  
 5158  		if (setrlimit(li->which, &rl) == -1) {
 5159  			job_log_error(j, LOG_WARNING, "setrlimit()");
 5160  		}
 5161  	}
 5162  
 5163  	if (unlikely(!j->inetcompat && j->session_create)) {
 5164  		launchd_SessionCreate();
 5165  	}
 5166  
 5167  	if (unlikely(j->low_pri_io)) {
 5168  		(void)job_assumes_zero_p(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE));
 5169  	}
 5170  	if (j->low_priority_background_io) {
 5171  		(void)job_assumes_zero_p(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_DARWIN_BG, IOPOL_THROTTLE));
 5172  	}
 5173  	if (unlikely(j->rootdir)) {
 5174  		(void)job_assumes_zero_p(j, chroot(j->rootdir));
 5175  		(void)job_assumes_zero_p(j, chdir("."));
 5176  	}
 5177  
 5178  	job_postfork_become_user(j);
 5179  
 5180  	if (unlikely(j->workingdir)) {
 5181  		if (chdir(j->workingdir) == -1) {
 5182  			if (errno == ENOENT || errno == ENOTDIR) {
 5183  				job_log(j, LOG_ERR, "Job specified non-existent working directory: %s", j->workingdir);
 5184  			} else {
 5185  				(void)job_assumes_zero(j, errno);
 5186  			}
 5187  		}
 5188  	}
 5189  
 5190  	if (unlikely(j->setmask)) {
 5191  		umask(j->mask);
 5192  	}
 5193  
 5194  	if (j->stdin_fd) {
 5195  		(void)job_assumes_zero_p(j, dup2(j->stdin_fd, STDIN_FILENO));
 5196  	} else {
 5197  		job_setup_fd(j, STDIN_FILENO, j->stdinpath, O_RDONLY|O_CREAT);
 5198  	}
 5199  	job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_CREAT|O_APPEND);
 5200  	job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_CREAT|O_APPEND);
 5201  
 5202  	jobmgr_setup_env_from_other_jobs(j->mgr);
 5203  
 5204  	SLIST_FOREACH(ei, &j->env, sle) {
 5205  		setenv(ei->key, ei->value, 1);
 5206  	}
 5207  
 5208  #if !TARGET_OS_EMBEDDED	
 5209  	if (j->jetsam_properties) {
 5210  		(void)job_assumes_zero(j, proc_setpcontrol(PROC_SETPC_TERMINATE));
 5211  	}
 5212  #endif
 5213  
 5214  #if TARGET_OS_EMBEDDED
 5215  	if (j->main_thread_priority != 0) {
 5216  		struct sched_param params;
 5217  		bzero(&params, sizeof(params));
 5218  		params.sched_priority = j->main_thread_priority;
 5219  		(void)job_assumes_zero_p(j, pthread_setschedparam(pthread_self(), SCHED_OTHER, &params));
 5220  	}
 5221  #endif
 5222  
 5223  	/*
 5224  	 * We'd like to call setsid() unconditionally, but we have reason to
 5225  	 * believe that prevents launchd from being able to send signals to
 5226  	 * setuid children. We'll settle for process-groups.
 5227  	 */
 5228  	if (getppid() != 1) {
 5229  		(void)job_assumes_zero_p(j, setpgid(0, 0));
 5230  	} else {
 5231  		(void)job_assumes_zero_p(j, setsid());
 5232  	}
 5233  }
 5234  
 5235  void
 5236  job_setup_fd(job_t j, int target_fd, const char *path, int flags)
 5237  {
 5238  	int fd;
 5239  
 5240  	if (!path) {
 5241  		return;
 5242  	}
 5243  
 5244  	if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
 5245  		job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
 5246  		return;
 5247  	}
 5248  
 5249  	(void)job_assumes_zero_p(j, dup2(fd, target_fd));
 5250  	(void)job_assumes_zero(j, runtime_close(fd));
 5251  }
 5252  
 5253  void
 5254  calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
 5255  {
 5256  	struct calendarinterval *ci_iter, *ci_prev = NULL;
 5257  	time_t later, head_later;
 5258  
 5259  	later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
 5260  
 5261  	if (ci->when.tm_wday != -1) {
 5262  		time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
 5263  
 5264  		if (ci->when.tm_mday == -1) {
 5265  			later = otherlater;
 5266  		} else {
 5267  			later = later < otherlater ? later : otherlater;
 5268  		}
 5269  	}
 5270  
 5271  	ci->when_next = later;
 5272  
 5273  	LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
 5274  		if (ci->when_next < ci_iter->when_next) {
 5275  			LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
 5276  			break;
 5277  		}
 5278  
 5279  		ci_prev = ci_iter;
 5280  	}
 5281  
 5282  	if (ci_iter == NULL) {
 5283  		// ci must want to fire after every other timer, or there are no timers
 5284  
 5285  		if (LIST_EMPTY(&sorted_calendar_events)) {
 5286  			LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
 5287  		} else {
 5288  			LIST_INSERT_AFTER(ci_prev, ci, global_sle);
 5289  		}
 5290  	}
 5291  
 5292  	head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
 5293  
 5294  	if (job_assumes_zero_p(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr)) != -1) {
 5295  		char time_string[100];
 5296  		size_t time_string_len;
 5297  
 5298  		ctime_r(&later, time_string);
 5299  		time_string_len = strlen(time_string);
 5300  
 5301  		if (likely(time_string_len && time_string[time_string_len - 1] == '\n')) {
 5302  			time_string[time_string_len - 1] = '\0';
 5303  		}
 5304  
 5305  		job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
 5306  	}
 5307  }
 5308  
 5309  bool
 5310  jobmgr_log_bug(_SIMPLE_STRING asl_message __attribute__((unused)), void *ctx, const char *message)
 5311  {
 5312  	jobmgr_t jm = ctx;
 5313  	jobmgr_log(jm, LOG_ERR, "%s", message);
 5314  
 5315  	return true;
 5316  }
 5317  
 5318  bool
 5319  job_log_bug(_SIMPLE_STRING asl_message __attribute__((unused)), void *ctx, const char *message)
 5320  {
 5321  	job_t j = ctx;
 5322  	job_log(j, LOG_ERR, "%s", message);
 5323  
 5324  	return true;
 5325  }
 5326  
 5327  // ri: NULL = please sample j->p; non-NULL = use this sample
 5328  void
 5329  job_log_perf_statistics(job_t j, struct rusage_info_v1 *ri, int64_t exit_status)
 5330  {
 5331  #if HAVE_SYSTEMSTATS
 5332  	if (j->anonymous || !j->p) {
 5333  		return;
 5334  	}
 5335  	if (!systemstats_is_enabled()) {
 5336  		return;
 5337  	}
 5338  	const char *name;
 5339  	if (j->cfbundleidentifier) {
 5340  		name = j->cfbundleidentifier;
 5341  	} else {
 5342  		name = j->label;
 5343  	}
 5344  	int r = 0;
 5345  	struct rusage_info_v1 ris;
 5346  	if (ri == NULL) {
 5347  		ri = &ris;
 5348  		r = proc_pid_rusage(j->p, RUSAGE_INFO_V1, (rusage_info_t)ri);
 5349  	}
 5350  	if (r == -1) {
 5351  		return;
 5352  	}
 5353  	job_log_systemstats(j->p, j->uniqueid, runtime_get_uniqueid(), j->mgr->req_pid, j->mgr->req_uniqueid, name, ri, exit_status);
 5354  #else
 5355  #pragma unused (j, ri, exit_status)
 5356  #endif
 5357  }
 5358  
 5359  #if HAVE_SYSTEMSTATS
 5360  // ri: NULL = don't write fields from ri; non-NULL = use this sample
 5361  static
 5362  void
 5363  job_log_systemstats(pid_t pid, uint64_t uniqueid, uint64_t parent_uniqueid, pid_t req_pid, uint64_t req_uniqueid, const char *name, struct rusage_info_v1 *ri, int64_t exit_status)
 5364  {
 5365  	if (!systemstats_is_enabled()) {
 5366  		return;
 5367  	}
 5368  
 5369  	struct systemstats_process_usage_s info;
 5370  	bzero(&info, sizeof(info));
 5371  	info.name = name;
 5372  	info.pid = pid;
 5373  	info.exit_status = exit_status;
 5374  	info.uid = getuid();
 5375  	info.ppid = getpid();
 5376  	info.responsible_pid = req_pid;
 5377  
 5378  	if (likely(ri)) {
 5379  		info.macho_uuid = (const uint8_t *)&ri->ri_uuid;
 5380  		info.user_time = ri->ri_user_time;
 5381  		info.system_time = ri->ri_system_time;
 5382  		info.pkg_idle_wkups = ri->ri_pkg_idle_wkups;
 5383  		info.interrupt_wkups = ri->ri_interrupt_wkups;
 5384  		info.proc_start_abstime = ri->ri_proc_start_abstime;
 5385  		info.proc_exit_abstime = ri->ri_proc_exit_abstime;
 5386  #if SYSTEMSTATS_API_VERSION >= 20130319
 5387  		info.pageins = ri->ri_pageins;
 5388  		info.wired_size = ri->ri_wired_size;
 5389  		info.resident_size = ri->ri_resident_size;
 5390  		info.phys_footprint = ri->ri_phys_footprint;
 5391  		// info.purgeablesize = ???
 5392  #endif
 5393  #if SYSTEMSTATS_API_VERSION >= 20130328
 5394  		info.child_user_time = ri->ri_child_user_time;
 5395  		info.child_system_time = ri->ri_child_system_time;
 5396  		info.child_pkg_idle_wkups = ri->ri_child_pkg_idle_wkups;
 5397  		info.child_interrupt_wkups = ri->ri_child_interrupt_wkups;
 5398  		info.child_pageins = ri->ri_child_pageins;
 5399  		info.child_elapsed_abstime = ri->ri_child_elapsed_abstime;
 5400  #endif
 5401  	}
 5402  #if SYSTEMSTATS_API_VERSION >= 20130410
 5403  	info.uniqueid = uniqueid;
 5404  	info.parent_uniqueid = parent_uniqueid;
 5405  	info.responsible_uniqueid = req_uniqueid;
 5406  #endif
 5407  	systemstats_write_process_usage(&info);
 5408  }
 5409  #endif /* HAVE_SYSTEMSTATS */
 5410  
 5411  struct waiting4attach *
 5412  waiting4attach_new(jobmgr_t jm, const char *name, mach_port_t port, pid_t dest, xpc_service_type_t type)
 5413  {
 5414  	size_t xtra = strlen(name) + 1;
 5415  
 5416  	struct waiting4attach *w4a = malloc(sizeof(*w4a) + xtra);
 5417  	if (!w4a) {
 5418  		return NULL;
 5419  	}
 5420  
 5421  	w4a->port = port;
 5422  	w4a->dest = dest;
 5423  	w4a->type = type;
 5424  	(void)strcpy(w4a->name, name);
 5425  
 5426  	if (dest) {
 5427  		LIST_INSERT_HEAD(&_launchd_domain_waiters, w4a, le);
 5428  	} else {
 5429  		LIST_INSERT_HEAD(&jm->attaches, w4a, le);
 5430  	}
 5431  
 5432  
 5433  	(void)jobmgr_assumes_zero(jm, launchd_mport_notify_req(port, MACH_NOTIFY_DEAD_NAME));
 5434  	return w4a;
 5435  }
 5436  
 5437  void
 5438  waiting4attach_delete(jobmgr_t jm, struct waiting4attach *w4a)
 5439  {
 5440  	jobmgr_log(jm, LOG_DEBUG, "Canceling dead-name notification for waiter port: 0x%x", w4a->port);
 5441  
 5442  	LIST_REMOVE(w4a, le);
 5443  
 5444  	mach_port_t previous = MACH_PORT_NULL;
 5445  	(void)jobmgr_assumes_zero(jm, mach_port_request_notification(mach_task_self(), w4a->port, MACH_NOTIFY_DEAD_NAME, 0, MACH_PORT_NULL, MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous));
 5446  	if (previous) {
 5447  		(void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(previous));
 5448  	}
 5449  
 5450  	jobmgr_assumes_zero(jm, launchd_mport_deallocate(w4a->port));
 5451  	free(w4a);
 5452  }
 5453  
 5454  struct waiting4attach *
 5455  waiting4attach_find(jobmgr_t jm, job_t j)
 5456  {
 5457  	char *name2use = (char *)j->label;
 5458  	if (j->app) {
 5459  		struct envitem *ei = NULL;
 5460  		SLIST_FOREACH(ei, &j->env, sle) {
 5461  			if (strcmp(ei->key, XPC_SERVICE_RENDEZVOUS_TOKEN) == 0) {
 5462  				name2use = ei->value;
 5463  				break;
 5464  			}
 5465  		}
 5466  	}
 5467  
 5468  	struct waiting4attach *w4ai = NULL;
 5469  	LIST_FOREACH(w4ai, &jm->attaches, le) {
 5470  		if (strcmp(name2use, w4ai->name) == 0) {
 5471  			job_log(j, LOG_DEBUG, "Found attachment: %s", name2use);
 5472  			break;
 5473  		}
 5474  	}
 5475  
 5476  	return w4ai;
 5477  }
 5478  
 5479  void
 5480  job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
 5481  {
 5482  	const char *label2use = j ? j->label : "com.apple.launchd.job-unknown";
 5483  	const char *mgr2use = j ? j->mgr->name : "com.apple.launchd.jobmanager-unknown";
 5484  	char *newmsg;
 5485  	int oldmask = 0;
 5486  	size_t newmsgsz;
 5487  
 5488  	struct launchd_syslog_attr attr = {
 5489  		.from_name = launchd_label,
 5490  		.about_name = label2use,
 5491  		.session_name = mgr2use,
 5492  		.priority = pri,
 5493  		.from_uid = getuid(),
 5494  		.from_pid = getpid(),
 5495  		.about_pid = j ? j->p : 0,
 5496  	};
 5497  
 5498  	/* Hack: If bootstrap_port is set, we must be on the child side of a
 5499  	 * fork(2), but before the exec*(3). Let's route the log message back to
 5500  	 * launchd proper.
 5501  	 */
 5502  	if (bootstrap_port) {
 5503  		return _vproc_logv(pri, err, msg, ap);
 5504  	}
 5505  
 5506  	newmsgsz = strlen(msg) + 200;
 5507  	newmsg = alloca(newmsgsz);
 5508  
 5509  	if (err) {
 5510  #if !TARGET_OS_EMBEDDED
 5511  		snprintf(newmsg, newmsgsz, "%s: %d: %s", msg, err, strerror(err));
 5512  #else
 5513  		snprintf(newmsg, newmsgsz, "(%s) %s: %d: %s", label2use, msg, err, strerror(err));
 5514  #endif
 5515  	} else {
 5516  #if !TARGET_OS_EMBEDDED
 5517  		snprintf(newmsg, newmsgsz, "%s", msg);
 5518  #else
 5519  		snprintf(newmsg, newmsgsz, "(%s) %s", label2use, msg);
 5520  #endif
 5521  	}
 5522  
 5523  	if (j && unlikely(j->debug)) {
 5524  		oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
 5525  	}
 5526  
 5527  	launchd_vsyslog(&attr, newmsg, ap);
 5528  
 5529  	if (j && unlikely(j->debug)) {
 5530  		setlogmask(oldmask);
 5531  	}
 5532  }
 5533  
 5534  void
 5535  job_log_error(job_t j, int pri, const char *msg, ...)
 5536  {
 5537  	va_list ap;
 5538  
 5539  	va_start(ap, msg);
 5540  	job_logv(j, pri, errno, msg, ap);
 5541  	va_end(ap);
 5542  }
 5543  
 5544  void
 5545  job_log(job_t j, int pri, const char *msg, ...)
 5546  {
 5547  	va_list ap;
 5548  
 5549  	va_start(ap, msg);
 5550  	job_logv(j, pri, 0, msg, ap);
 5551  	va_end(ap);
 5552  }
 5553  
 5554  #if 0
 5555  void
 5556  jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
 5557  {
 5558  	va_list ap;
 5559  
 5560  	va_start(ap, msg);
 5561  	jobmgr_logv(jm, pri, errno, msg, ap);
 5562  	va_end(ap);
 5563  }
 5564  #endif
 5565  
 5566  void
 5567  jobmgr_log_perf_statistics(jobmgr_t jm, bool signal_children)
 5568  {
 5569  #if HAVE_SYSTEMSTATS
 5570  	// Log information for kernel_task and pid 1 launchd.
 5571  	if (systemstats_is_enabled() && pid1_magic && jm == root_jobmgr) {
 5572  #if SYSTEMSTATS_API_VERSION >= 20130328
 5573  		if (_systemstats_get_property(SYSTEMSTATS_API_VERSION, SYSTEMSTATS_WRITER_launchd, SYSTEMSTATS_PROPERTY_SHOULD_LOG_ENERGY_STATISTICS)) {
 5574  			systemstats_write_intel_energy_statistics(NULL);
 5575  		}
 5576  #else
 5577  		systemstats_write_intel_energy_statistics(NULL);
 5578  #endif
 5579  		job_log_systemstats(0, 0, 0, 0, 0, "com.apple.kernel", NULL, -1);
 5580  		job_log_systemstats(1, 1, 0, 1, 1, "com.apple.launchd", NULL, -1);
 5581  	}
 5582  #endif
 5583  	jobmgr_t jmi = NULL;
 5584  	SLIST_FOREACH(jmi, &jm->submgrs, sle) {
 5585  		jobmgr_log_perf_statistics(jmi, signal_children);
 5586  	}
 5587  
 5588  	if (jm->xpc_singleton) {
 5589  		jobmgr_log(jm, LOG_PERF, "XPC Singleton Domain: %s", jm->shortdesc);
 5590  	} else if (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
 5591  		jobmgr_log(jm, LOG_PERF, "XPC Private Domain: %s", jm->owner);
 5592  	} else if (jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) {
 5593  		jobmgr_log(jm, LOG_PERF, "Created via bootstrap_subset()");
 5594  	}
 5595  
 5596  	jobmgr_log(jm, LOG_PERF, "Jobs in job manager:");
 5597  
 5598  	job_t ji = NULL;
 5599  	LIST_FOREACH(ji, &jm->jobs, sle) {
 5600  		job_log_perf_statistics(ji, NULL, -1);
 5601  		if (unlikely(signal_children) && unlikely(strstr(ji->label, "com.apple.launchd.peruser.") == ji->label)) {
 5602  			jobmgr_log(jm, LOG_PERF, "Sending SIGINFO to peruser launchd %d", ji->p);
 5603  			kill(ji->p, SIGINFO);
 5604  		}
 5605  	}
 5606  
 5607  	jobmgr_log(jm, LOG_PERF, "End of job list.");
 5608  }
 5609  
 5610  void
 5611  jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
 5612  {
 5613  	va_list ap;
 5614  
 5615  	va_start(ap, msg);
 5616  	jobmgr_logv(jm, pri, 0, msg, ap);
 5617  	va_end(ap);
 5618  }
 5619  
 5620  void
 5621  jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
 5622  {
 5623  	if (!jm) {
 5624  		jm = root_jobmgr;
 5625  	}
 5626  
 5627  	char *newmsg;
 5628  	char *newname;
 5629  	size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
 5630  
 5631  	newname = alloca((jmname_len + 1) * 2);
 5632  	newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
 5633  	newmsg = alloca(newmsgsz);
 5634  
 5635  	for (i = 0, o = 0; i < jmname_len; i++, o++) {
 5636  		if (jm->name[i] == '%') {
 5637  			newname[o] = '%';
 5638  			o++;
 5639  		}
 5640  		newname[o] = jm->name[i];
 5641  	}
 5642  	newname[o] = '\0';
 5643  
 5644  	if (err) {
 5645  		snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
 5646  	} else {
 5647  		snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
 5648  	}
 5649  
 5650  	if (jm->parentmgr) {
 5651  		jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
 5652  	} else {
 5653  		struct launchd_syslog_attr attr = {
 5654  			.from_name = launchd_label,
 5655  			.about_name = launchd_label,
 5656  			.session_name = jm->name,
 5657  			.priority = pri,
 5658  			.from_uid = getuid(),
 5659  			.from_pid = getpid(),
 5660  			.about_pid = getpid(),
 5661  		};
 5662  
 5663  		launchd_vsyslog(&attr, newmsg, ap);
 5664  	}
 5665  }
 5666  
 5667  struct cal_dict_walk {
 5668  	job_t j;
 5669  	struct tm tmptm;
 5670  };
 5671  
 5672  void
 5673  calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
 5674  {
 5675  	struct cal_dict_walk *cdw = context;
 5676  	struct tm *tmptm = &cdw->tmptm;
 5677  	job_t j = cdw->j;
 5678  	int64_t val;
 5679  
 5680  	if (unlikely(LAUNCH_DATA_INTEGER != launch_data_get_type(obj))) {
 5681  		// hack to let caller know something went wrong
 5682  		tmptm->tm_sec = -1;
 5683  		return;
 5684  	}
 5685  
 5686  	val = launch_data_get_integer(obj);
 5687  
 5688  	if (val < 0) {
 5689  		job_log(j, LOG_WARNING, "The interval for key \"%s\" is less than zero.", key);
 5690  	} else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
 5691  		if (val > 59) {
 5692  			job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key);
 5693  			tmptm->tm_sec = -1;
 5694  		} else {
 5695  			tmptm->tm_min = (typeof(tmptm->tm_min)) val;
 5696  		}
 5697  	} else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
 5698  		if (val > 23) {
 5699  			job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key);
 5700  			tmptm->tm_sec = -1;
 5701  		} else {
 5702  			tmptm->tm_hour = (typeof(tmptm->tm_hour)) val;
 5703  		}
 5704  	} else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
 5705  		if (val < 1 || val > 31) {
 5706  			job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key);
 5707  			tmptm->tm_sec = -1;
 5708  		} else {
 5709  			tmptm->tm_mday = (typeof(tmptm->tm_mday)) val;
 5710  		}
 5711  	} else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
 5712  		if (val > 7) {
 5713  			job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key);
 5714  			tmptm->tm_sec = -1;
 5715  		} else {
 5716  			tmptm->tm_wday = (typeof(tmptm->tm_wday)) val;
 5717  		}
 5718  	} else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
 5719  		if (val > 12) {
 5720  			job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key);
 5721  			tmptm->tm_sec = -1;
 5722  		} else {
 5723  			tmptm->tm_mon = (typeof(tmptm->tm_mon)) val;
 5724  			tmptm->tm_mon -= 1; // 4798263 cron compatibility
 5725  		}
 5726  	}
 5727  }
 5728  
 5729  bool
 5730  calendarinterval_new_from_obj(job_t j, launch_data_t obj)
 5731  {
 5732  	struct cal_dict_walk cdw;
 5733  
 5734  	cdw.j = j;
 5735  	memset(&cdw.tmptm, 0, sizeof(0));
 5736  
 5737  	cdw.tmptm.tm_min = -1;
 5738  	cdw.tmptm.tm_hour = -1;
 5739  	cdw.tmptm.tm_mday = -1;
 5740  	cdw.tmptm.tm_wday = -1;
 5741  	cdw.tmptm.tm_mon = -1;
 5742  
 5743  	if (!job_assumes(j, obj != NULL)) {
 5744  		return false;
 5745  	}
 5746  
 5747  	if (unlikely(LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj))) {
 5748  		return false;
 5749  	}
 5750  
 5751  	launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &cdw);
 5752  
 5753  	if (unlikely(cdw.tmptm.tm_sec == -1)) {
 5754  		return false;
 5755  	}
 5756  
 5757  	return calendarinterval_new(j, &cdw.tmptm);
 5758  }
 5759  
 5760  bool
 5761  calendarinterval_new(job_t j, struct tm *w)
 5762  {
 5763  	struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
 5764  
 5765  	if (!job_assumes(j, ci != NULL)) {
 5766  		return false;
 5767  	}
 5768  
 5769  	ci->when = *w;
 5770  	ci->job = j;
 5771  
 5772  	SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
 5773  
 5774  	calendarinterval_setalarm(j, ci);
 5775  
 5776  	runtime_add_weak_ref();
 5777  
 5778  	return true;
 5779  }
 5780  
 5781  void
 5782  calendarinterval_delete(job_t j, struct calendarinterval *ci)
 5783  {
 5784  	SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
 5785  	LIST_REMOVE(ci, global_sle);
 5786  
 5787  	free(ci);
 5788  
 5789  	runtime_del_weak_ref();
 5790  }
 5791  
 5792  void
 5793  calendarinterval_sanity_check(void)
 5794  {
 5795  	struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
 5796  	time_t now = time(NULL);
 5797  
 5798  	if (unlikely(ci && (ci->when_next < now))) {
 5799  		(void)jobmgr_assumes_zero_p(root_jobmgr, raise(SIGUSR1));
 5800  	}
 5801  }
 5802  
 5803  void
 5804  calendarinterval_callback(void)
 5805  {
 5806  	struct calendarinterval *ci, *ci_next;
 5807  	time_t now = time(NULL);
 5808  
 5809  	LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
 5810  		job_t j = ci->job;
 5811  
 5812  		if (ci->when_next > now) {
 5813  			break;
 5814  		}
 5815  
 5816  		LIST_REMOVE(ci, global_sle);
 5817  		calendarinterval_setalarm(j, ci);
 5818  
 5819  		j->start_pending = true;
 5820  		job_dispatch(j, false);
 5821  	}
 5822  }
 5823  
 5824  bool
 5825  socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt)
 5826  {
 5827  	struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
 5828  
 5829  	if (!job_assumes(j, sg != NULL)) {
 5830  		return false;
 5831  	}
 5832  
 5833  	sg->fds = calloc(1, fd_cnt * sizeof(int));
 5834  	sg->fd_cnt = fd_cnt;
 5835  
 5836  	if (!job_assumes(j, sg->fds != NULL)) {
 5837  		free(sg);
 5838  		return false;
 5839  	}
 5840  
 5841  	memcpy(sg->fds, fds, fd_cnt * sizeof(int));
 5842  	strcpy(sg->name_init, name);
 5843  
 5844  	SLIST_INSERT_HEAD(&j->sockets, sg, sle);
 5845  
 5846  	runtime_add_weak_ref();
 5847  
 5848  	return true;
 5849  }
 5850  
 5851  void
 5852  socketgroup_delete(job_t j, struct socketgroup *sg)
 5853  {
 5854  	unsigned int i;
 5855  
 5856  	for (i = 0; i < sg->fd_cnt; i++) {
 5857  #if 0
 5858  		struct sockaddr_storage ss;
 5859  		struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
 5860  		socklen_t ss_len = sizeof(ss);
 5861  
 5862  		// 5480306
 5863  		if (job_assumes_zero(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
 5864  				&& job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
 5865  			(void)job_assumes(j, unlink(sun->sun_path) != -1);
 5866  			// We might conditionally need to delete a directory here
 5867  		}
 5868  #endif
 5869  		(void)job_assumes_zero_p(j, runtime_close(sg->fds[i]));
 5870  	}
 5871  
 5872  	SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
 5873  
 5874  	free(sg->fds);
 5875  	free(sg);
 5876  
 5877  	runtime_del_weak_ref();
 5878  }
 5879  
 5880  void
 5881  socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
 5882  {
 5883  	struct kevent kev[sg->fd_cnt];
 5884  	char buf[10000];
 5885  	unsigned int i, buf_off = 0;
 5886  
 5887  	for (i = 0; i < sg->fd_cnt; i++) {
 5888  		EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
 5889  		buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
 5890  	}
 5891  
 5892  	job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
 5893  
 5894  	(void)job_assumes_zero_p(j, kevent_bulk_mod(kev, sg->fd_cnt));
 5895  
 5896  	for (i = 0; i < sg->fd_cnt; i++) {
 5897  		(void)job_assumes(j, kev[i].flags & EV_ERROR);
 5898  		errno = (typeof(errno)) kev[i].data;
 5899  		(void)job_assumes_zero(j, kev[i].data);
 5900  	}
 5901  }
 5902  
 5903  void
 5904  socketgroup_ignore(job_t j, struct socketgroup *sg)
 5905  {
 5906  	socketgroup_kevent_mod(j, sg, false);
 5907  }
 5908  
 5909  void
 5910  socketgroup_watch(job_t j, struct socketgroup *sg)
 5911  {
 5912  	socketgroup_kevent_mod(j, sg, true);
 5913  }
 5914  
 5915  void
 5916  socketgroup_callback(job_t j)
 5917  {
 5918  	job_dispatch(j, true);
 5919  }
 5920  
 5921  bool
 5922  envitem_new(job_t j, const char *k, const char *v, bool global)
 5923  {
 5924  	if (global && !launchd_allow_global_dyld_envvars) {
 5925  		if (strncmp("DYLD_", k, sizeof("DYLD_") - 1) == 0) {
 5926  			job_log(j, LOG_ERR, "Ignoring global environment variable submitted by job (variable=value): %s=%s", k, v);
 5927  			return false;
 5928  		}
 5929  	}
 5930  
 5931  	struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
 5932  
 5933  	if (!job_assumes(j, ei != NULL)) {
 5934  		return false;
 5935  	}
 5936  
 5937  	strcpy(ei->key_init, k);
 5938  	ei->value = ei->key_init + strlen(k) + 1;
 5939  	strcpy(ei->value, v);
 5940  
 5941  	if (global) {
 5942  		if (SLIST_EMPTY(&j->global_env)) {
 5943  			LIST_INSERT_HEAD(&j->mgr->global_env_jobs, j, global_env_sle);
 5944  		}
 5945  		SLIST_INSERT_HEAD(&j->global_env, ei, sle);
 5946  	} else {
 5947  		SLIST_INSERT_HEAD(&j->env, ei, sle);
 5948  	}
 5949  
 5950  	job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
 5951  
 5952  	return true;
 5953  }
 5954  
 5955  void
 5956  envitem_delete(job_t j, struct envitem *ei, bool global)
 5957  {
 5958  	if (global) {
 5959  		SLIST_REMOVE(&j->global_env, ei, envitem, sle);
 5960  		if (SLIST_EMPTY(&j->global_env)) {
 5961  			LIST_REMOVE(j, global_env_sle);
 5962  		}
 5963  	} else {
 5964  		SLIST_REMOVE(&j->env, ei, envitem, sle);
 5965  	}
 5966  
 5967  	free(ei);
 5968  }
 5969  
 5970  void
 5971  envitem_setup(launch_data_t obj, const char *key, void *context)
 5972  {
 5973  	job_t j = context;
 5974  
 5975  	if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
 5976  		return;
 5977  	}
 5978  
 5979  	if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
 5980  		envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env);
 5981  	} else {
 5982  		job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
 5983  	}
 5984  }
 5985  
 5986  bool
 5987  limititem_update(job_t j, int w, rlim_t r)
 5988  {
 5989  	struct limititem *li;
 5990  
 5991  	SLIST_FOREACH(li, &j->limits, sle) {
 5992  		if (li->which == w) {
 5993  			break;
 5994  		}
 5995  	}
 5996  
 5997  	if (li == NULL) {
 5998  		li = calloc(1, sizeof(struct limititem));
 5999  
 6000  		if (!job_assumes(j, li != NULL)) {
 6001  			return false;
 6002  		}
 6003  
 6004  		SLIST_INSERT_HEAD(&j->limits, li, sle);
 6005  
 6006  		li->which = w;
 6007  	}
 6008  
 6009  	if (j->importing_hard_limits) {
 6010  		li->lim.rlim_max = r;
 6011  		li->sethard = true;
 6012  	} else {
 6013  		li->lim.rlim_cur = r;
 6014  		li->setsoft = true;
 6015  	}
 6016  
 6017  	return true;
 6018  }
 6019  
 6020  void
 6021  limititem_delete(job_t j, struct limititem *li)
 6022  {
 6023  	SLIST_REMOVE(&j->limits, li, limititem, sle);
 6024  
 6025  	free(li);
 6026  }
 6027  
 6028  #if HAVE_SANDBOX
 6029  void
 6030  seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
 6031  {
 6032  	job_t j = context;
 6033  
 6034  	if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
 6035  		job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
 6036  		return;
 6037  	}
 6038  
 6039  	if (launch_data_get_bool(obj) == false) {
 6040  		return;
 6041  	}
 6042  
 6043  	if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
 6044  		j->seatbelt_flags |= SANDBOX_NAMED;
 6045  	}
 6046  }
 6047  #endif
 6048  
 6049  void
 6050  limititem_setup(launch_data_t obj, const char *key, void *context)
 6051  {
 6052  	job_t j = context;
 6053  	size_t i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
 6054  	rlim_t rl;
 6055  
 6056  	if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
 6057  		return;
 6058  	}
 6059  
 6060  	rl = launch_data_get_integer(obj);
 6061  
 6062  	for (i = 0; i < limits_cnt; i++) {
 6063  		if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
 6064  			break;
 6065  		}
 6066  	}
 6067  
 6068  	if (i == limits_cnt) {
 6069  		return;
 6070  	}
 6071  
 6072  	limititem_update(j, launchd_keys2limits[i].val, rl);
 6073  }
 6074  
 6075  bool
 6076  job_useless(job_t j)
 6077  {
 6078  	if ((j->legacy_LS_job || j->only_once) && j->start_time != 0) {
 6079  		if (j->legacy_LS_job && j->j_port) {
 6080  			return false;
 6081  		}
 6082  		job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
 6083  		return true;
 6084  	} else if (j->removal_pending) {
 6085  		job_log(j, LOG_DEBUG, "Exited while removal was pending.");
 6086  		return true;
 6087  	} else if (j->shutdown_monitor) {
 6088  		return false;
 6089  	} else if (j->mgr->shutting_down && !j->mgr->parentmgr) {
 6090  		job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
 6091  		if (total_children == 0 && !j->anonymous) {
 6092  			job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last to exit during shutdown of: %s.", j->mgr->name);
 6093  		}
 6094  		return true;
 6095  	} else if (j->legacy_mach_job) {
 6096  		if (SLIST_EMPTY(&j->machservices)) {
 6097  			job_log(j, LOG_INFO, "Garbage collecting");
 6098  			return true;
 6099  		} else if (!j->checkedin) {
 6100  			job_log(j, LOG_WARNING, "Failed to check-in!");
 6101  			return true;
 6102  		}
 6103  	} else {
 6104  		/* If the job's executable does not have any valid architectures (for
 6105  		 * example, if it's a PowerPC-only job), then we don't even bother
 6106  		 * trying to relaunch it, as we have no reasonable expectation that
 6107  		 * the situation will change.
 6108  		 *
 6109  		 * <rdar://problem/9106979>
 6110  		 */
 6111  		if (!j->did_exec && WEXITSTATUS(j->last_exit_status) == EBADARCH) {
 6112  			job_log(j, LOG_ERR, "Job executable does not contain supported architectures. Unloading it. Its plist should be removed.");
 6113  			return true;
 6114  		}
 6115  	}
 6116  
 6117  	return false;
 6118  }
 6119  
 6120  bool
 6121  job_keepalive(job_t j)
 6122  {
 6123  	mach_msg_type_number_t statusCnt;
 6124  	mach_port_status_t status;
 6125  	struct semaphoreitem *si;
 6126  	struct machservice *ms;
 6127  	bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
 6128  	bool is_not_kextd = (launchd_apple_internal || (strcmp(j->label, "com.apple.kextd") != 0));
 6129  
 6130  	if (unlikely(j->mgr->shutting_down)) {
 6131  		return false;
 6132  	}
 6133  
 6134  	/*
 6135  	 * 5066316
 6136  	 *
 6137  	 * We definitely need to revisit this after Leopard ships. Please see
 6138  	 * launchctl.c for the other half of this hack.
 6139  	 */
 6140  	if (unlikely((j->mgr->global_on_demand_cnt > 0) && is_not_kextd)) {
 6141  		return false;
 6142  	}
 6143  
 6144  	if (unlikely(j->needs_kickoff)) {
 6145  		job_log(j, LOG_DEBUG, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
 6146  		return false;
 6147  	}
 6148  
 6149  	if (j->start_pending) {
 6150  		job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
 6151  		return true;
 6152  	}
 6153  
 6154  	if (!j->ondemand) {
 6155  		job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
 6156  		return true;
 6157  	}
 6158  
 6159  	SLIST_FOREACH(ms, &j->machservices, sle) {
 6160  		statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
 6161  		if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
 6162  					(mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
 6163  			continue;
 6164  		}
 6165  		if (status.mps_msgcount) {
 6166  			job_log(j, LOG_DEBUG, "KeepAlive check: %d queued Mach messages on service: %s",
 6167  					status.mps_msgcount, ms->name);
 6168  			return true;
 6169  		}
 6170  	}
 6171  
 6172  	/* TODO: Coalesce external events and semaphore items, since they're basically
 6173  	 * the same thing.
 6174  	 */
 6175  	struct externalevent *ei = NULL;
 6176  	LIST_FOREACH(ei, &j->events, job_le) {
 6177  		if (ei->state == ei->wanted_state) {
 6178  			return true;
 6179  		}
 6180  	}
 6181  
 6182  	SLIST_FOREACH(si, &j->semaphores, sle) {
 6183  		bool wanted_state = false;
 6184  		job_t other_j;
 6185  
 6186  		switch (si->why) {
 6187  		case NETWORK_UP:
 6188  			wanted_state = true;
 6189  		case NETWORK_DOWN:
 6190  			if (network_up == wanted_state) {
 6191  				job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
 6192  				return true;
 6193  			}
 6194  			break;
 6195  		case SUCCESSFUL_EXIT:
 6196  			wanted_state = true;
 6197  		case FAILED_EXIT:
 6198  			if (good_exit == wanted_state) {
 6199  				job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
 6200  				return true;
 6201  			}
 6202  			break;
 6203  		case CRASHED:
 6204  			wanted_state = true;
 6205  		case DID_NOT_CRASH:
 6206  			if (j->crashed == wanted_state) {
 6207  				return true;
 6208  			}
 6209  			break;
 6210  		case OTHER_JOB_ENABLED:
 6211  			wanted_state = true;
 6212  		case OTHER_JOB_DISABLED:
 6213  			if ((bool)job_find(NULL, si->what) == wanted_state) {
 6214  				job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
 6215  				return true;
 6216  			}
 6217  			break;
 6218  		case OTHER_JOB_ACTIVE:
 6219  			wanted_state = true;
 6220  		case OTHER_JOB_INACTIVE:
 6221  			if ((other_j = job_find(NULL, si->what))) {
 6222  				if ((bool)other_j->p == wanted_state) {
 6223  					job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
 6224  					return true;
 6225  				}
 6226  			}
 6227  			break;
 6228  		}
 6229  	}
 6230  
 6231  	return false;
 6232  }
 6233  
 6234  const char *
 6235  job_active(job_t j)
 6236  {
 6237  	if (j->p && j->shutdown_monitor) {
 6238  		return "Monitoring shutdown";
 6239  	}
 6240  	if (j->p) {
 6241  		return "PID is still valid";
 6242  	}
 6243  
 6244  	if (j->priv_port_has_senders) {
 6245  		return "Privileged Port still has outstanding senders";
 6246  	}
 6247  
 6248  	struct machservice *ms;
 6249  	SLIST_FOREACH(ms, &j->machservices, sle) {
 6250  		/* If we've simulated an exit, we mark the job as non-active, even
 6251  		 * though doing so will leave it in an unsafe state. We do this so that
 6252  		 * shutdown can proceed. See <rdar://problem/11126530>.
 6253  		 */
 6254  		if (!j->workaround9359725 && ms->recv && machservice_active(ms)) {
 6255  			job_log(j, LOG_INFO, "Mach service is still active: %s", ms->name);
 6256  			return "Mach service is still active";
 6257  		}
 6258  	}
 6259  
 6260  	return NULL;
 6261  }
 6262  
 6263  void
 6264  machservice_watch(job_t j, struct machservice *ms)
 6265  {
 6266  	if (ms->recv) {
 6267  		if (job_assumes_zero(j, runtime_add_mport(ms->port, NULL)) == KERN_INVALID_RIGHT) {
 6268  			ms->recv_race_hack = true;
 6269  		}
 6270  	}
 6271  }
 6272  
 6273  void
 6274  machservice_ignore(job_t j, struct machservice *ms)
 6275  {
 6276  	/* We only add ports whose receive rights we control into the port set, so
 6277  	 * don't attempt to remove te service from the port set if we didn't put it
 6278  	 * there in the first place. Otherwise, we could wind up trying to access a
 6279  	 * bogus index (like MACH_PORT_DEAD) or zeroing a valid one out.
 6280  	 *
 6281  	 * <rdar://problem/10898014>
 6282  	 */
 6283  	if (ms->recv) {
 6284  		(void)job_assumes_zero(j, runtime_remove_mport(ms->port));
 6285  	}
 6286  }
 6287  
 6288  void
 6289  machservice_resetport(job_t j, struct machservice *ms)
 6290  {
 6291  	LIST_REMOVE(ms, port_hash_sle);
 6292  	(void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
 6293  	(void)job_assumes_zero(j, launchd_mport_deallocate(ms->port));
 6294  
 6295  	ms->gen_num++;
 6296  	(void)job_assumes_zero(j, launchd_mport_create_recv(&ms->port));
 6297  	(void)job_assumes_zero(j, launchd_mport_make_send(ms->port));
 6298  	LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
 6299  }
 6300  
 6301  void
 6302  machservice_stamp_port(job_t j, struct machservice *ms)
 6303  {
 6304  	mach_port_context_t ctx = 0;
 6305  	char *where2get = j->prog ? j->prog : j->argv[0];
 6306  
 6307  	char *prog = NULL;
 6308  	if ((prog = strrchr(where2get, '/'))) {
 6309  		prog++;
 6310  	} else {
 6311  		prog = where2get;
 6312  	}
 6313  
 6314  	(void)strncpy((char *)&ctx, prog, sizeof(ctx));
 6315  #if __LITTLE_ENDIAN__
 6316  #if __LP64__
 6317  	ctx = OSSwapBigToHostInt64(ctx);
 6318  #else
 6319  	ctx = OSSwapBigToHostInt32(ctx);
 6320  #endif
 6321  #endif
 6322  
 6323  	(void)job_assumes_zero(j, mach_port_set_context(mach_task_self(), ms->port, ctx));
 6324  }
 6325  
 6326  struct machservice *
 6327  machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
 6328  {
 6329  	/* Don't create new MachServices for dead ports. This is primarily for
 6330  	 * clients who use bootstrap_register2(). They can pass in a send right, but
 6331  	 * then that port can immediately go dead. Hilarity ensues.
 6332  	 *
 6333  	 * <rdar://problem/10898014>
 6334  	 */
 6335  	if (*serviceport == MACH_PORT_DEAD) {
 6336  		return NULL;
 6337  	}
 6338  
 6339  	struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1);
 6340  	if (!job_assumes(j, ms != NULL)) {
 6341  		return NULL;
 6342  	}
 6343  
 6344  	strcpy((char *)ms->name, name);
 6345  	ms->job = j;
 6346  	ms->gen_num = 1;
 6347  	ms->per_pid = pid_local;
 6348  
 6349  	if (likely(*serviceport == MACH_PORT_NULL)) {
 6350  		if (job_assumes_zero(j, launchd_mport_create_recv(&ms->port)) != KERN_SUCCESS) {
 6351  			goto out_bad;
 6352  		}
 6353  
 6354  		if (job_assumes_zero(j, launchd_mport_make_send(ms->port)) != KERN_SUCCESS) {
 6355  			goto out_bad2;
 6356  		}
 6357  		*serviceport = ms->port;
 6358  		ms->recv = true;
 6359  	} else {
 6360  		ms->port = *serviceport;
 6361  		ms->isActive = true;
 6362  	}
 6363  
 6364  	SLIST_INSERT_HEAD(&j->machservices, ms, sle);
 6365  
 6366  	jobmgr_t where2put = j->mgr;
 6367  	// XPC domains are separate from Mach bootstraps.
 6368  	if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
 6369  		if (launchd_flat_mach_namespace && !(j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
 6370  			where2put = root_jobmgr;
 6371  		}
 6372  	}
 6373  
 6374  	/* Don't allow MachServices added by multiple-instance jobs to be looked up
 6375  	 * by others. We could just do this with a simple bit, but then we'd have to
 6376  	 * uniquify the names ourselves to avoid collisions. This is just easier.
 6377  	 */
 6378  	if (!j->dedicated_instance) {
 6379  		LIST_INSERT_HEAD(&where2put->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);	
 6380  	}
 6381  	LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
 6382  
 6383  	if (ms->recv) {
 6384  		machservice_stamp_port(j, ms);
 6385  	}
 6386  
 6387  	job_log(j, LOG_DEBUG, "Mach service added%s: %s", (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? " to private namespace" : "", name);
 6388  
 6389  	return ms;
 6390  out_bad2:
 6391  	(void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
 6392  out_bad:
 6393  	free(ms);
 6394  	return NULL;
 6395  }
 6396  
 6397  struct machservice *
 6398  machservice_new_alias(job_t j, struct machservice *orig)
 6399  {
 6400  	struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(orig->name) + 1);
 6401  	if (job_assumes(j, ms != NULL)) {
 6402  		strcpy((char *)ms->name, orig->name);
 6403  		ms->alias = orig;
 6404  		ms->job = j;
 6405  
 6406  		LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
 6407  		SLIST_INSERT_HEAD(&j->machservices, ms, sle);
 6408  		jobmgr_log(j->mgr, LOG_DEBUG, "Service aliased into job manager: %s", orig->name);
 6409  	}
 6410  
 6411  	return ms;
 6412  }
 6413  
 6414  bootstrap_status_t
 6415  machservice_status(struct machservice *ms)
 6416  {
 6417  	ms = ms->alias ? ms->alias : ms;
 6418  	if (ms->isActive) {
 6419  		return BOOTSTRAP_STATUS_ACTIVE;
 6420  	} else if (ms->job->ondemand) {
 6421  		return BOOTSTRAP_STATUS_ON_DEMAND;
 6422  	} else {
 6423  		return BOOTSTRAP_STATUS_INACTIVE;
 6424  	}
 6425  }
 6426  
 6427  void
 6428  job_setup_exception_port(job_t j, task_t target_task)
 6429  {
 6430  	struct machservice *ms;
 6431  	thread_state_flavor_t f = 0;
 6432  	mach_port_t exc_port = the_exception_server;
 6433  
 6434  	if (unlikely(j->alt_exc_handler)) {
 6435  		ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
 6436  		if (likely(ms)) {
 6437  			exc_port = machservice_port(ms);
 6438  		} else {
 6439  			job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
 6440  		}
 6441  	} else if (unlikely(j->internal_exc_handler)) {
 6442  		exc_port = runtime_get_kernel_port();
 6443  	} else if (unlikely(!exc_port)) {
 6444  		return;
 6445  	}
 6446  
 6447  #if defined (__ppc__) || defined(__ppc64__)
 6448  	f = PPC_THREAD_STATE64;
 6449  #elif defined(__i386__) || defined(__x86_64__)
 6450  	f = x86_THREAD_STATE;
 6451  #elif defined(__arm__)
 6452  	f = ARM_THREAD_STATE;
 6453  #else
 6454  #error "unknown architecture"
 6455  #endif
 6456  
 6457  	if (likely(target_task)) {
 6458  		kern_return_t kr = task_set_exception_ports(target_task, EXC_MASK_CRASH | EXC_MASK_GUARD | EXC_MASK_RESOURCE, exc_port, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f);
 6459  		if (kr) {
 6460  			if (kr != MACH_SEND_INVALID_DEST) {
 6461  				(void)job_assumes_zero(j, kr);
 6462  			} else {
 6463  				job_log(j, LOG_WARNING, "Task died before exception port could be set.");
 6464  			}
 6465  		}
 6466  	} else if (pid1_magic && the_exception_server) {
 6467  		mach_port_t mhp = mach_host_self();
 6468  		(void)job_assumes_zero(j, host_set_exception_ports(mhp, EXC_MASK_CRASH | EXC_MASK_GUARD | EXC_MASK_RESOURCE, the_exception_server, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f));
 6469  		(void)job_assumes_zero(j, launchd_mport_deallocate(mhp));
 6470  	}
 6471  }
 6472  
 6473  void
 6474  job_set_exception_port(job_t j, mach_port_t port)
 6475  {
 6476  	if (unlikely(!the_exception_server)) {
 6477  		the_exception_server = port;
 6478  		job_setup_exception_port(j, 0);
 6479  	} else {
 6480  		job_log(j, LOG_WARNING, "The exception server is already claimed!");
 6481  	}
 6482  }
 6483  
 6484  void
 6485  machservice_setup_options(launch_data_t obj, const char *key, void *context)
 6486  {
 6487  	struct machservice *ms = context;
 6488  	mach_port_t mhp = mach_host_self();
 6489  	int which_port;
 6490  	bool b;
 6491  
 6492  	if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
 6493  		return;
 6494  	}
 6495  
 6496  	switch (launch_data_get_type(obj)) {
 6497  	case LAUNCH_DATA_INTEGER:
 6498  		which_port = (int)launch_data_get_integer(obj); // XXX we should bound check this...
 6499  		if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
 6500  			switch (which_port) {
 6501  			case TASK_KERNEL_PORT:
 6502  			case TASK_HOST_PORT:
 6503  			case TASK_NAME_PORT:
 6504  			case TASK_BOOTSTRAP_PORT:
 6505  			/* I find it a little odd that zero isn't reserved in the header.
 6506  			 * Normally Mach is fairly good about this convention...
 6507  			 */
 6508  			case 0:
 6509  				job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
 6510  				break;
 6511  			default:
 6512  				ms->special_port_num = which_port;
 6513  				SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
 6514  				break;
 6515  			}
 6516  		} else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && pid1_magic) {
 6517  			if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
 6518  				(void)job_assumes_zero(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)));
 6519  			} else {
 6520  				job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
 6521  			}
 6522  		}
 6523  	case LAUNCH_DATA_BOOL:
 6524  		b = launch_data_get_bool(obj);
 6525  		if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
 6526  			ms->debug_on_close = b;
 6527  		} else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
 6528  			ms->reset = b;
 6529  		} else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
 6530  			ms->hide = b;
 6531  		} else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
 6532  			job_set_exception_port(ms->job, ms->port);
 6533  		} else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
 6534  			ms->kUNCServer = b;
 6535  			(void)job_assumes_zero(ms->job, host_set_UNDServer(mhp, ms->port));
 6536  		}
 6537  		break;
 6538  	case LAUNCH_DATA_STRING:
 6539  		if (strcasecmp(key, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH) == 0) {
 6540  			const char *option = launch_data_get_string(obj);
 6541  			if (strcasecmp(option, "One") == 0) {
 6542  				ms->drain_one_on_crash = true;
 6543  			} else if (strcasecmp(option, "All") == 0) {
 6544  				ms->drain_all_on_crash = true;
 6545  			}
 6546  		}
 6547  		break;
 6548  	case LAUNCH_DATA_DICTIONARY:
 6549  		if (launch_data_dict_get_count(obj) == 0) {
 6550  			job_set_exception_port(ms->job, ms->port);
 6551  		}
 6552  		break;
 6553  	default:
 6554  		break;
 6555  	}
 6556  
 6557  	(void)job_assumes_zero(ms->job, launchd_mport_deallocate(mhp));
 6558  }
 6559  
 6560  void
 6561  machservice_setup(launch_data_t obj, const char *key, void *context)
 6562  {
 6563  	job_t j = context;
 6564  	struct machservice *ms;
 6565  	mach_port_t p = MACH_PORT_NULL;
 6566  
 6567  	if (unlikely(ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
 6568  		job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
 6569  		return;
 6570  	}
 6571  
 6572  	if (!job_assumes(j, (ms = machservice_new(j, key, &p, false)) != NULL)) {
 6573  		return;
 6574  	}
 6575  
 6576  	ms->isActive = false;
 6577  	ms->upfront = true;
 6578  
 6579  	if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
 6580  		launch_data_dict_iterate(obj, machservice_setup_options, ms);
 6581  	}
 6582  
 6583  	kern_return_t kr = mach_port_set_attributes(mach_task_self(), ms->port, MACH_PORT_TEMPOWNER, NULL, 0);
 6584  	(void)job_assumes_zero(j, kr);
 6585  }
 6586  
 6587  jobmgr_t
 6588  jobmgr_do_garbage_collection(jobmgr_t jm)
 6589  {
 6590  	jobmgr_t jmi = NULL, jmn = NULL;
 6591  	SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
 6592  		jobmgr_do_garbage_collection(jmi);
 6593  	}
 6594  
 6595  	if (!jm->shutting_down) {
 6596  		return jm;
 6597  	}
 6598  
 6599  	if (SLIST_EMPTY(&jm->submgrs)) {
 6600  		jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
 6601  	} else {
 6602  		jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
 6603  		SLIST_FOREACH(jmi, &jm->submgrs, sle) {
 6604  			jobmgr_log(jm, LOG_DEBUG, "Submanager: %s", jmi->name);
 6605  		}
 6606  	}
 6607  
 6608  	size_t actives = 0;
 6609  	job_t ji = NULL, jn = NULL;
 6610  	LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
 6611  		if (ji->anonymous) {
 6612  			continue;
 6613  		}
 6614  
 6615  		// Let the shutdown monitor be up until the very end.
 6616  		if (ji->shutdown_monitor) {
 6617  			continue;
 6618  		}
 6619  
 6620  		/* On our first pass through, open a transaction for all the jobs that
 6621  		 * need to be dirty at shutdown. We'll close these transactions once the
 6622  		 * jobs that do not need to be dirty at shutdown have all exited.
 6623  		 */
 6624  		if (ji->dirty_at_shutdown && !jm->shutdown_jobs_dirtied) {
 6625  			job_open_shutdown_transaction(ji);
 6626  		}
 6627  
 6628  		const char *active = job_active(ji);
 6629  		if (!active) {
 6630  			job_remove(ji);
 6631  		} else {
 6632  			job_log(ji, LOG_DEBUG, "Job is active: %s", active);
 6633  			job_stop(ji);
 6634  
 6635  			if (!ji->dirty_at_shutdown) {
 6636  				actives++;
 6637  			}
 6638  
 6639  			if (ji->clean_kill) {
 6640  				job_log(ji, LOG_DEBUG, "Job was killed cleanly.");
 6641  			} else {
 6642  				job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
 6643  			}
 6644  		}
 6645  	}
 6646  
 6647  	jm->shutdown_jobs_dirtied = true;
 6648  	if (actives == 0) {
 6649  		if (!jm->shutdown_jobs_cleaned) {
 6650  			/* Once all normal jobs have exited, we clean the dirty-at-shutdown
 6651  			 * jobs and make them into normal jobs so that the above loop will
 6652  			 * handle them appropriately.
 6653  			 */
 6654  			LIST_FOREACH(ji, &jm->jobs, sle) {
 6655  				if (ji->anonymous) {
 6656  					continue;
 6657  				}
 6658  
 6659  				if (!job_active(ji)) {
 6660  					continue;
 6661  				}
 6662  				
 6663  				if (ji->shutdown_monitor) {
 6664  					continue;
 6665  				}
 6666  
 6667  				job_close_shutdown_transaction(ji);
 6668  				actives++;
 6669  			}
 6670  
 6671  			jm->shutdown_jobs_cleaned = true;
 6672  		}
 6673  
 6674  		if (SLIST_EMPTY(&jm->submgrs) && actives == 0) {
 6675  			/* We may be in a situation where the shutdown monitor is all that's
 6676  			 * left, in which case we want to stop it. Like dirty-at-shutdown
 6677  			 * jobs, we turn it back into a normal job so that the main loop
 6678  			 * treats it appropriately.
 6679  			 *
 6680  			 * See:
 6681  			 * <rdar://problem/10756306>
 6682  			 * <rdar://problem/11034971>
 6683  			 * <rdar://problem/11549541>
 6684  			 */
 6685  			if (jm->monitor_shutdown && _launchd_shutdown_monitor) {
 6686  				/* The rest of shutdown has completed, so we can kill the shutdown
 6687  				 * monitor now like it was any other job.
 6688  				 */
 6689  				_launchd_shutdown_monitor->shutdown_monitor = false;
 6690  				
 6691  				job_log(_launchd_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Stopping shutdown monitor.");
 6692  				job_stop(_launchd_shutdown_monitor);
 6693  				_launchd_shutdown_monitor = NULL;
 6694  			} else {
 6695  				jobmgr_log(jm, LOG_DEBUG, "Removing.");
 6696  				jobmgr_remove(jm);
 6697  				return NULL;
 6698  			}
 6699  		}
 6700  	}
 6701  
 6702  	return jm;
 6703  }
 6704  
 6705  void
 6706  jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np)
 6707  {
 6708  	/* I maintain that stray processes should be at the mercy of launchd during
 6709  	 * shutdown, but nevertheless, things like diskimages-helper can stick
 6710  	 * around, and SIGKILLing them can result in data loss. So we send SIGTERM
 6711  	 * to all the strays and don't wait for them to exit before moving on.
 6712  	 * 
 6713  	 * See rdar://problem/6562592
 6714  	 */
 6715  	size_t i = 0;
 6716  	for (i = 0; i < np; i++) {
 6717  		if (p[i] != 0) {
 6718  			jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sending SIGTERM to PID %u and continuing...", p[i]);
 6719  			(void)jobmgr_assumes_zero_p(jm, kill2(p[i], SIGTERM));
 6720  		}
 6721  	}
 6722  }
 6723  
 6724  void
 6725  jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays)
 6726  {
 6727  	size_t kp_skipped = 0, len = sizeof(pid_t) * get_kern_max_proc();
 6728  	pid_t *pids = NULL;
 6729  	int i = 0, kp_cnt = 0;
 6730  
 6731  	if (likely(jm->parentmgr || !pid1_magic)) {
 6732  		return;
 6733  	}
 6734  
 6735  	if (!jobmgr_assumes(jm, (pids = malloc(len)) != NULL)) {
 6736  		return;
 6737  	}
 6738  
 6739  	runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS);
 6740  
 6741  	if (jobmgr_assumes_zero_p(jm, (kp_cnt = proc_listallpids(pids, len))) == -1) {
 6742  		goto out;
 6743  	}
 6744  
 6745  	pid_t *ps = (pid_t *)calloc(sizeof(pid_t), kp_cnt);
 6746  	for (i = 0; i < kp_cnt; i++) {
 6747  		struct proc_bsdshortinfo proc;
 6748  		if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
 6749  			if (errno != ESRCH) {
 6750  				(void)jobmgr_assumes_zero(jm, errno);
 6751  			}
 6752  
 6753  			kp_skipped++;
 6754  			continue;
 6755  		}
 6756  
 6757  		pid_t p_i = pids[i];
 6758  		pid_t pp_i = proc.pbsi_ppid;
 6759  		pid_t pg_i = proc.pbsi_pgid;
 6760  		const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
 6761  		const char *n = proc.pbsi_comm;
 6762  
 6763  		if (unlikely(p_i == 0 || p_i == 1)) {
 6764  			kp_skipped++;
 6765  			continue;
 6766  		}
 6767  
 6768  		if (_launchd_shutdown_monitor && pp_i == _launchd_shutdown_monitor->p) {
 6769  			kp_skipped++;
 6770  			continue;
 6771  		}
 6772  
 6773  		// We might have some jobs hanging around that we've decided to shut down in spite of.
 6774  		job_t j = jobmgr_find_by_pid(jm, p_i, false);
 6775  		if (!j || (j && j->anonymous)) {
 6776  			jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z, j ? "anonymous job" : "process", p_i, pp_i, pg_i, n);
 6777  
 6778  			int status = 0;
 6779  			if (pp_i == getpid() && !jobmgr_assumes(jm, proc.pbsi_status != SZOMB)) {
 6780  				if (jobmgr_assumes_zero(jm, waitpid(p_i, &status, WNOHANG)) == 0) {
 6781  					jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status));
 6782  				}
 6783  				kp_skipped++;
 6784  			} else {
 6785  				job_t leader = jobmgr_find_by_pid(jm, pg_i, false);
 6786  				/* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
 6787  				 * so we don't want to terminate them. Long-term, I'd really like to provide shutdown
 6788  				 * hints to the kernel along the way, so that it could shutdown certain subsystems when
 6789  				 * their userspace emissaries go away, before the call to reboot(2).
 6790  				 */
 6791  				if (leader && leader->ignore_pg_at_shutdown) {
 6792  					kp_skipped++;
 6793  				} else {
 6794  					ps[i] = p_i;
 6795  				}
 6796  			}
 6797  		} else {
 6798  			kp_skipped++;
 6799  		}
 6800  	}
 6801  
 6802  	if ((kp_cnt - kp_skipped > 0) && kill_strays) {
 6803  		jobmgr_kill_stray_children(jm, ps, kp_cnt - kp_skipped);
 6804  	}
 6805  
 6806  	free(ps);
 6807  out:
 6808  	free(pids);
 6809  }
 6810  
 6811  jobmgr_t 
 6812  jobmgr_parent(jobmgr_t jm)
 6813  {
 6814  	return jm->parentmgr;
 6815  }
 6816  
 6817  void
 6818  job_uncork_fork(job_t j)
 6819  {
 6820  	pid_t c = j->p;
 6821  
 6822  	job_log(j, LOG_DEBUG, "Uncorking the fork().");
 6823  	/* this unblocks the child and avoids a race
 6824  	 * between the above fork() and the kevent_mod() */
 6825  	(void)job_assumes(j, write(j->fork_fd, &c, sizeof(c)) == sizeof(c));
 6826  	(void)job_assumes_zero_p(j, runtime_close(j->fork_fd));
 6827  	j->fork_fd = 0;
 6828  }
 6829  
 6830  jobmgr_t 
 6831  jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool skip_init, mach_port_t asport)
 6832  {
 6833  	job_t bootstrapper = NULL;
 6834  	jobmgr_t jmr;
 6835  
 6836  	__OS_COMPILETIME_ASSERT__(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
 6837  
 6838  	if (unlikely(jm && requestorport == MACH_PORT_NULL)) {
 6839  		jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
 6840  		return NULL;
 6841  	}
 6842  
 6843  	jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : NAME_MAX + 1));
 6844  
 6845  	if (!jobmgr_assumes(jm, jmr != NULL)) {
 6846  		return NULL;
 6847  	}
 6848  
 6849  	if (jm == NULL) {
 6850  		root_jobmgr = jmr;
 6851  	}
 6852  
 6853  	jmr->kqjobmgr_callback = jobmgr_callback;
 6854  	strcpy(jmr->name_init, name ? name : "Under construction");
 6855  
 6856  	jmr->req_port = requestorport;
 6857  
 6858  	if ((jmr->parentmgr = jm)) {
 6859  		SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
 6860  	}
 6861  
 6862  	if (jm && jobmgr_assumes_zero(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME)) != KERN_SUCCESS) {
 6863  		goto out_bad;
 6864  	}
 6865  
 6866  	if (transfer_port != MACH_PORT_NULL) {
 6867  		(void)jobmgr_assumes(jmr, jm != NULL);
 6868  		jmr->jm_port = transfer_port;
 6869  	} else if (!jm && !pid1_magic) {
 6870  		char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
 6871  		name_t service_buf;
 6872  
 6873  		snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
 6874  
 6875  		if (jobmgr_assumes_zero(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port)) != 0) {
 6876  			goto out_bad;
 6877  		}
 6878  
 6879  		if (trusted_fd) {
 6880  			int dfd, lfd = (int) strtol(trusted_fd, NULL, 10);
 6881  
 6882  			if ((dfd = dup(lfd)) >= 0) {
 6883  				(void)jobmgr_assumes_zero_p(jmr, runtime_close(dfd));
 6884  				(void)jobmgr_assumes_zero_p(jmr, runtime_close(lfd));
 6885  			}
 6886  
 6887  			unsetenv(LAUNCHD_TRUSTED_FD_ENV);
 6888  		}
 6889  
 6890  		// cut off the Libc cache, we don't want to deadlock against ourself
 6891  		inherited_bootstrap_port = bootstrap_port;
 6892  		bootstrap_port = MACH_PORT_NULL;
 6893  		os_assert_zero(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME));
 6894  
 6895  		// We set this explicitly as we start each child
 6896  		os_assert_zero(launchd_set_bport(MACH_PORT_NULL));
 6897  	} else if (jobmgr_assumes_zero(jmr, launchd_mport_create_recv(&jmr->jm_port)) != KERN_SUCCESS) {
 6898  		goto out_bad;
 6899  	}
 6900  
 6901  	if (!name) {
 6902  		sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
 6903  	}
 6904  
 6905  	if (!jm) {
 6906  		(void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
 6907  		(void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
 6908  		(void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGUSR2, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
 6909  		(void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGINFO, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
 6910  		(void)jobmgr_assumes_zero_p(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr));
 6911  	}
 6912  
 6913  	if (name && !skip_init) {
 6914  		bootstrapper = jobmgr_init_session(jmr, name, sflag);
 6915  	}
 6916  
 6917  	if (!bootstrapper || !bootstrapper->weird_bootstrap) {
 6918  		if (jobmgr_assumes_zero(jmr, runtime_add_mport(jmr->jm_port, job_server)) != KERN_SUCCESS) {
 6919  			goto out_bad;
 6920  		}
 6921  	}
 6922  
 6923  	jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
 6924  
 6925  	if (bootstrapper) {
 6926  		bootstrapper->asport = asport;
 6927  
 6928  		jobmgr_log(jmr, LOG_DEBUG, "Bootstrapping new job manager with audit session %u", asport);
 6929  		(void)jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
 6930  	} else {
 6931  		jmr->req_asport = asport;
 6932  	}
 6933  
 6934  	if (asport != MACH_PORT_NULL) {
 6935  		(void)jobmgr_assumes_zero(jmr, launchd_mport_copy_send(asport));
 6936  	}
 6937  
 6938  	if (jmr->parentmgr) {
 6939  		runtime_add_weak_ref();
 6940  	}
 6941  
 6942  	return jmr;
 6943  
 6944  out_bad:
 6945  	if (jmr) {
 6946  		jobmgr_remove(jmr);
 6947  		if (jm == NULL) {
 6948  			root_jobmgr = NULL;
 6949  		}
 6950  	}
 6951  	return NULL;
 6952  }
 6953  
 6954  jobmgr_t
 6955  jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name)
 6956  {
 6957  	jobmgr_t new = NULL;
 6958  
 6959  	/* These job managers are basically singletons, so we use the root Mach
 6960  	 * bootstrap port as their requestor ports so they'll never go away.
 6961  	 */
 6962  	mach_port_t req_port = root_jobmgr->jm_port;
 6963  	if (jobmgr_assumes_zero(jm, launchd_mport_make_send(req_port)) == KERN_SUCCESS) {
 6964  		new = jobmgr_new(root_jobmgr, req_port, MACH_PORT_NULL, false, name, true, MACH_PORT_NULL);
 6965  		if (new) {
 6966  			new->properties |= BOOTSTRAP_PROPERTY_XPC_SINGLETON;
 6967  			new->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
 6968  			new->xpc_singleton = true;
 6969  		}
 6970  	}
 6971  
 6972  	return new;
 6973  }
 6974  
 6975  jobmgr_t
 6976  jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid)
 6977  {
 6978  	jobmgr_t jmi = NULL;
 6979  	LIST_FOREACH(jmi, &_s_xpc_user_domains, xpc_le) {
 6980  		if (jmi->req_euid == uid) {
 6981  			return jmi;
 6982  		}
 6983  	}
 6984  
 6985  	name_t name;
 6986  	(void)snprintf(name, sizeof(name), "com.apple.xpc.domain.peruser.%u", uid);
 6987  	jmi = jobmgr_new_xpc_singleton_domain(jm, name);
 6988  	if (jobmgr_assumes(jm, jmi != NULL)) {
 6989  		/* We need to create a per-user launchd for this UID if there isn't one
 6990  		 * already so we can grab the bootstrap port.
 6991  		 */
 6992  		job_t puj = jobmgr_lookup_per_user_context_internal(NULL, uid, &jmi->req_bsport);
 6993  		if (jobmgr_assumes(jmi, puj != NULL)) {
 6994  			(void)jobmgr_assumes_zero(jmi, launchd_mport_copy_send(puj->asport));
 6995  			(void)jobmgr_assumes_zero(jmi, launchd_mport_copy_send(jmi->req_bsport));
 6996  			jmi->shortdesc = "per-user";
 6997  			jmi->req_asport = puj->asport;
 6998  			jmi->req_asid = puj->asid;
 6999  			jmi->req_euid = uid;
 7000  			jmi->req_egid = -1;
 7001  
 7002  			LIST_INSERT_HEAD(&_s_xpc_user_domains, jmi, xpc_le);
 7003  		} else {
 7004  			jobmgr_remove(jmi);
 7005  		}
 7006  	}
 7007  
 7008  	return jmi;
 7009  }
 7010  
 7011  jobmgr_t
 7012  jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid)
 7013  {
 7014  	jobmgr_t jmi = NULL;
 7015  	LIST_FOREACH(jmi, &_s_xpc_session_domains, xpc_le) {
 7016  		if (jmi->req_asid == asid) {
 7017  			return jmi;
 7018  		}
 7019  	}
 7020  
 7021  	name_t name;
 7022  	(void)snprintf(name, sizeof(name), "com.apple.xpc.domain.persession.%i", asid);
 7023  	jmi = jobmgr_new_xpc_singleton_domain(jm, name);
 7024  	if (jobmgr_assumes(jm, jmi != NULL)) {
 7025  		(void)jobmgr_assumes_zero(jmi, launchd_mport_make_send(root_jobmgr->jm_port));
 7026  		jmi->shortdesc = "per-session";
 7027  		jmi->req_bsport = root_jobmgr->jm_port;
 7028  		(void)jobmgr_assumes_zero(jmi, audit_session_port(asid, &jmi->req_asport));
 7029  		jmi->req_asid = asid;
 7030  		jmi->req_euid = -1;
 7031  		jmi->req_egid = -1;
 7032  
 7033  		LIST_INSERT_HEAD(&_s_xpc_session_domains, jmi, xpc_le);
 7034  	} else {
 7035  		jobmgr_remove(jmi);
 7036  	}
 7037  
 7038  	return jmi;
 7039  }
 7040  
 7041  job_t
 7042  jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
 7043  {
 7044  	const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
 7045  	char thelabel[1000];
 7046  	job_t bootstrapper;
 7047  
 7048  	snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
 7049  	bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
 7050  
 7051  	if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || !pid1_magic)) {
 7052  		bootstrapper->is_bootstrapper = true;
 7053  		char buf[100];
 7054  
 7055  		// <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs)
 7056  		snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
 7057  		envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false);
 7058  		bootstrapper->weird_bootstrap = true;
 7059  		(void)jobmgr_assumes(jm, job_setup_machport(bootstrapper));
 7060  	} else if (bootstrapper && strncmp(session_type, VPROCMGR_SESSION_SYSTEM, sizeof(VPROCMGR_SESSION_SYSTEM)) == 0) {
 7061  #if TARGET_OS_EMBEDDED
 7062  		bootstrapper->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE;
 7063  #endif
 7064  		bootstrapper->is_bootstrapper = true;
 7065  		if (jobmgr_assumes(jm, pid1_magic)) {
 7066  			// Have our system bootstrapper print out to the console.
 7067  			bootstrapper->stdoutpath = strdup(_PATH_CONSOLE);
 7068  			bootstrapper->stderrpath = strdup(_PATH_CONSOLE);
 7069  
 7070  			if (launchd_console) {
 7071  				(void)jobmgr_assumes_zero_p(jm, kevent_mod((uintptr_t)fileno(launchd_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm));
 7072  			}
 7073  		}
 7074  	}
 7075  
 7076  	jm->session_initialized = true;
 7077  	return bootstrapper;
 7078  }
 7079  
 7080  jobmgr_t
 7081  jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
 7082  {
 7083  	struct machservice *ms, *next_ms;
 7084  	jobmgr_t jmi, jmn;
 7085  
 7086  	/* Mach ports, unlike Unix descriptors, are reference counted. In other
 7087  	 * words, when some program hands us a second or subsequent send right to a
 7088  	 * port we already have open, the Mach kernel gives us the same port number
 7089  	 * back and increments an reference count associated with the port. This
 7090  	 * This forces us, when discovering that a receive right at the other end
 7091  	 * has been deleted, to wander all of our objects to see what weird places
 7092  	 * clients might have handed us the same send right to use.
 7093  	 */
 7094  
 7095  	if (jm == root_jobmgr) {
 7096  		if (port == inherited_bootstrap_port) {
 7097  			(void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(port));
 7098  			inherited_bootstrap_port = MACH_PORT_NULL;
 7099  
 7100  			return jobmgr_shutdown(jm);
 7101  		}
 7102  
 7103  		LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
 7104  			if (ms->port == port && !ms->recv) {
 7105  				machservice_delete(ms->job, ms, true);
 7106  			}
 7107  		}
 7108  	}
 7109  
 7110  	SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
 7111  		jobmgr_delete_anything_with_port(jmi, port);
 7112  	}
 7113  
 7114  	if (jm->req_port == port) {
 7115  		jobmgr_log(jm, LOG_DEBUG, "Request port died: %i", MACH_PORT_INDEX(port));
 7116  		return jobmgr_shutdown(jm);
 7117  	}
 7118  
 7119  	struct waiting4attach *w4ai = NULL;
 7120  	struct waiting4attach *w4ait = NULL;
 7121  	LIST_FOREACH_SAFE(w4ai, &jm->attaches, le, w4ait) {
 7122  		if (port == w4ai->port) {
 7123  			waiting4attach_delete(jm, w4ai);
 7124  			break;
 7125  		}
 7126  	}
 7127  
 7128  	return jm;
 7129  }
 7130  
 7131  struct machservice *
 7132  jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
 7133  {
 7134  	struct machservice *ms;
 7135  	job_t target_j;
 7136  
 7137  	jobmgr_log(jm, LOG_DEBUG, "Looking up %sservice %s", target_pid ? "per-PID " : "", name);
 7138  
 7139  	if (target_pid) {
 7140  		/* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
 7141  		 * bootstrap in other bootstraps.
 7142  		 */
 7143  
 7144  		// Start in the given bootstrap.
 7145  		if (unlikely((target_j = jobmgr_find_by_pid(jm, target_pid, false)) == NULL)) {
 7146  			// If we fail, do a deep traversal.
 7147  			if (unlikely((target_j = jobmgr_find_by_pid_deep(root_jobmgr, target_pid, true)) == NULL)) {
 7148  				jobmgr_log(jm, LOG_DEBUG, "Didn't find PID %i", target_pid);
 7149  				return NULL;
 7150  			}
 7151  		}
 7152  
 7153  		SLIST_FOREACH(ms, &target_j->machservices, sle) {
 7154  			if (ms->per_pid && strcmp(name, ms->name) == 0) {
 7155  				return ms;
 7156  			}
 7157  		}
 7158  
 7159  		job_log(target_j, LOG_DEBUG, "Didn't find per-PID Mach service: %s", name);
 7160  		return NULL;
 7161  	}
 7162  
 7163  	jobmgr_t where2look = jm;
 7164  	// XPC domains are separate from Mach bootstraps.
 7165  	if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
 7166  		if (launchd_flat_mach_namespace && !(jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
 7167  			where2look = root_jobmgr;
 7168  		}
 7169  	}
 7170  
 7171  	LIST_FOREACH(ms, &where2look->ms_hash[hash_ms(name)], name_hash_sle) {
 7172  		if (!ms->per_pid && strcmp(name, ms->name) == 0) {
 7173  			return ms;
 7174  		}
 7175  	}
 7176  
 7177  	if (jm->parentmgr == NULL || !check_parent) {
 7178  		return NULL;
 7179  	}
 7180  
 7181  	return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
 7182  }
 7183  
 7184  mach_port_t
 7185  machservice_port(struct machservice *ms)
 7186  {
 7187  	return ms->port;
 7188  }
 7189  
 7190  job_t 
 7191  machservice_job(struct machservice *ms)
 7192  {
 7193  	return ms->job;
 7194  }
 7195  
 7196  bool
 7197  machservice_hidden(struct machservice *ms)
 7198  {
 7199  	return ms->hide;
 7200  }
 7201  
 7202  bool
 7203  machservice_active(struct machservice *ms)
 7204  {
 7205  	return ms->isActive;
 7206  }
 7207  
 7208  const char *
 7209  machservice_name(struct machservice *ms)
 7210  {
 7211  	return ms->name;
 7212  }
 7213  
 7214  void
 7215  machservice_drain_port(struct machservice *ms)
 7216  {
 7217  	bool drain_one = ms->drain_one_on_crash;
 7218  	bool drain_all = ms->drain_all_on_crash;
 7219  
 7220  	if (!job_assumes(ms->job, (drain_one || drain_all) == true)) {
 7221  		return;
 7222  	}
 7223  
 7224  	job_log(ms->job, LOG_INFO, "Draining %s...", ms->name);
 7225  
 7226  	char req_buff[sizeof(union __RequestUnion__catch_mach_exc_subsystem) * 2];
 7227  	char rep_buff[sizeof(union __ReplyUnion__catch_mach_exc_subsystem)];
 7228  	mig_reply_error_t *req_hdr = (mig_reply_error_t *)&req_buff;
 7229  	mig_reply_error_t *rep_hdr = (mig_reply_error_t *)&rep_buff;
 7230  
 7231  	mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
 7232  
 7233  	do {
 7234  		/* This should be a direct check on the Mach service to see if it's an exception-handling
 7235  		 * port, and it will break things if ReportCrash or SafetyNet start advertising other
 7236  		 * Mach services. But for now, it should be okay.
 7237  		 */
 7238  		if (ms->job->alt_exc_handler || ms->job->internal_exc_handler) {
 7239  			mr = launchd_exc_runtime_once(ms->port, sizeof(req_buff), sizeof(rep_buff), req_hdr, rep_hdr, 0);
 7240  		} else {
 7241  			mach_msg_options_t options =	MACH_RCV_MSG		|
 7242  											MACH_RCV_TIMEOUT	;
 7243  
 7244  			mr = mach_msg((mach_msg_header_t *)req_hdr, options, 0, sizeof(req_buff), ms->port, 0, MACH_PORT_NULL);
 7245  			switch (mr) {
 7246  			case MACH_MSG_SUCCESS:
 7247  				mach_msg_destroy((mach_msg_header_t *)req_hdr);
 7248  				break;
 7249  			case MACH_RCV_TIMED_OUT:
 7250  				break;
 7251  			case MACH_RCV_TOO_LARGE:
 7252  				launchd_syslog(LOG_WARNING, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff));
 7253  				break;
 7254  			default:
 7255  				break;
 7256  			}
 7257  		}
 7258  	} while (drain_all && mr != MACH_RCV_TIMED_OUT);
 7259  }
 7260  
 7261  void
 7262  machservice_delete(job_t j, struct machservice *ms, bool port_died)
 7263  {
 7264  	if (ms->alias) {
 7265  		/* HACK: Egregious code duplication. But dealing with aliases is a
 7266  		 * pretty simple affair since they can't and shouldn't have any complex
 7267  		 * behaviors associated with them.
 7268  		 */
 7269  		LIST_REMOVE(ms, name_hash_sle);
 7270  		SLIST_REMOVE(&j->machservices, ms, machservice, sle);
 7271  		free(ms);
 7272  		return;
 7273  	}
 7274  
 7275  	if (unlikely(ms->debug_on_close)) {
 7276  		job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
 7277  		(void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
 7278  	}
 7279  
 7280  	if (ms->recv && job_assumes(j, !machservice_active(ms))) {
 7281  		job_log(j, LOG_DEBUG, "Closing receive right for %s", ms->name);
 7282  		(void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
 7283  	}
 7284  
 7285  	(void)job_assumes_zero(j, launchd_mport_deallocate(ms->port));
 7286  
 7287  	if (unlikely(ms->port == the_exception_server)) {
 7288  		the_exception_server = 0;
 7289  	}
 7290  
 7291  	job_log(j, LOG_DEBUG, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
 7292  
 7293  	if (ms->special_port_num) {
 7294  		SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
 7295  	}
 7296  	SLIST_REMOVE(&j->machservices, ms, machservice, sle);
 7297  
 7298  	if (!(j->dedicated_instance || ms->event_channel)) {
 7299  		LIST_REMOVE(ms, name_hash_sle);
 7300  	}
 7301  	LIST_REMOVE(ms, port_hash_sle);
 7302  
 7303  	free(ms);
 7304  }
 7305  
 7306  void
 7307  machservice_request_notifications(struct machservice *ms)
 7308  {
 7309  	mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
 7310  
 7311  	ms->isActive = true;
 7312  
 7313  	if (ms->recv) {
 7314  		which = MACH_NOTIFY_PORT_DESTROYED;
 7315  		job_checkin(ms->job);
 7316  	}
 7317  
 7318  	(void)job_assumes_zero(ms->job, launchd_mport_notify_req(ms->port, which));
 7319  }
 7320  
 7321  #define NELEM(x) (sizeof(x)/sizeof(x[0]))
 7322  #define END_OF(x) (&(x)[NELEM(x)])
 7323  
 7324  char **
 7325  mach_cmd2argv(const char *string)
 7326  {
 7327  	char *argv[100], args[1000];
 7328  	const char *cp;
 7329  	char *argp = args, term, **argv_ret, *co;
 7330  	unsigned int nargs = 0, i;
 7331  
 7332  	for (cp = string; *cp;) {
 7333  		while (isspace(*cp))
 7334  			cp++;
 7335  		term = (*cp == '"') ? *cp++ : '\0';
 7336  		if (nargs < NELEM(argv)) {
 7337  			argv[nargs++] = argp;
 7338  		}
 7339  		while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
 7340  			if (*cp == '\\') {
 7341  				cp++;
 7342  			}
 7343  			*argp++ = *cp;
 7344  			if (*cp) {
 7345  				cp++;
 7346  			}
 7347  		}
 7348  		*argp++ = '\0';
 7349  	}
 7350  	argv[nargs] = NULL;
 7351  
 7352  	if (nargs == 0) {
 7353  		return NULL;
 7354  	}
 7355  
 7356  	argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
 7357  
 7358  	if (!argv_ret) {
 7359  		(void)os_assumes_zero(errno);
 7360  		return NULL;
 7361  	}
 7362  
 7363  	co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
 7364  
 7365  	for (i = 0; i < nargs; i++) {
 7366  		strcpy(co, argv[i]);
 7367  		argv_ret[i] = co;
 7368  		co += strlen(argv[i]) + 1;
 7369  	}
 7370  	argv_ret[i] = NULL;
 7371  
 7372  	return argv_ret;
 7373  }
 7374  
 7375  void
 7376  job_checkin(job_t j)
 7377  {
 7378  	j->checkedin = true;
 7379  }
 7380  
 7381  bool job_is_god(job_t j)
 7382  {
 7383  	return j->embedded_god;
 7384  }
 7385  
 7386  bool
 7387  job_ack_port_destruction(mach_port_t p)
 7388  {
 7389  	struct machservice *ms;
 7390  	job_t j;
 7391  
 7392  	LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
 7393  		if (ms->recv && (ms->port == p)) {
 7394  			break;
 7395  		}
 7396  	}
 7397  
 7398  	if (!ms) {
 7399  		launchd_syslog(LOG_WARNING, "Could not find MachService to match receive right: 0x%x", p);
 7400  		return false;
 7401  	}
 7402  
 7403  	j = ms->job;
 7404  
 7405  	jobmgr_log(root_jobmgr, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
 7406  
 7407  	/* Without being the exception handler, NOTE_EXIT is our only way to tell if
 7408  	 * the job  crashed, and we can't rely on NOTE_EXIT always being processed
 7409  	 * after all the job's receive rights have been returned.
 7410  	 *
 7411  	 * So when we get receive rights back, check to see if the job has been
 7412  	 * reaped yet. If not, then we add this service to a list of services to be
 7413  	 * drained on crash if it's requested that behavior. So, for a job with N
 7414  	 * receive rights all requesting that they be drained on crash, we can
 7415  	 * safely handle the following sequence of events.
 7416  	 * 
 7417  	 * ReceiveRight0Returned
 7418  	 * ReceiveRight1Returned
 7419  	 * ReceiveRight2Returned
 7420  	 * NOTE_EXIT (reap, get exit status)
 7421  	 * ReceiveRight3Returned
 7422  	 * .
 7423  	 * .
 7424  	 * .
 7425  	 * ReceiveRight(N - 1)Returned
 7426  	 */
 7427  	if (ms->drain_one_on_crash || ms->drain_all_on_crash) {
 7428  		if (j->crashed && j->reaped) {
 7429  			job_log(j, LOG_DEBUG, "Job has crashed. Draining port...");
 7430  			machservice_drain_port(ms);
 7431  		} else if (!(j->crashed || j->reaped)) {
 7432  			job_log(j, LOG_DEBUG, "Job's exit status is still unknown. Deferring drain.");
 7433  		}
 7434  	}
 7435  
 7436  	ms->isActive = false;
 7437  	if (ms->delete_on_destruction) {
 7438  		machservice_delete(j, ms, false);
 7439  	} else if (ms->reset) {
 7440  		machservice_resetport(j, ms);
 7441  	}
 7442  
 7443  	kern_return_t kr = mach_port_set_attributes(mach_task_self(), ms->port, MACH_PORT_TEMPOWNER, NULL, 0);
 7444  	(void)job_assumes_zero(j, kr);
 7445  	machservice_stamp_port(j, ms);
 7446  	job_dispatch(j, false);
 7447  
 7448  	if (ms->recv_race_hack) {
 7449  		ms->recv_race_hack = false;
 7450  		machservice_watch(ms->job, ms);
 7451  	}
 7452  
 7453  	root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
 7454  
 7455  	return true;
 7456  }
 7457  
 7458  void
 7459  job_ack_no_senders(job_t j)
 7460  {
 7461  	j->priv_port_has_senders = false;
 7462  
 7463  	(void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
 7464  	j->j_port = 0;
 7465  
 7466  	job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
 7467  
 7468  	job_dispatch(j, false);
 7469  }
 7470  
 7471  bool
 7472  semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
 7473  {
 7474  	struct semaphoreitem *si;
 7475  	size_t alloc_sz = sizeof(struct semaphoreitem);
 7476  
 7477  	if (what) {
 7478  		alloc_sz += strlen(what) + 1;
 7479  	}
 7480  
 7481  	if (job_assumes(j, si = calloc(1, alloc_sz)) == NULL) {
 7482  		return false;
 7483  	}
 7484  
 7485  	si->why = why;
 7486  
 7487  	if (what) {
 7488  		strcpy(si->what_init, what);
 7489  	}
 7490  
 7491  	SLIST_INSERT_HEAD(&j->semaphores, si, sle);
 7492  
 7493  	if ((why == OTHER_JOB_ENABLED || why == OTHER_JOB_DISABLED) && !j->nosy) {
 7494  		job_log(j, LOG_DEBUG, "Job is interested in \"%s\".", what);
 7495  		SLIST_INSERT_HEAD(&s_curious_jobs, j, curious_jobs_sle);
 7496  		j->nosy = true;
 7497  	}
 7498  
 7499  	semaphoreitem_runtime_mod_ref(si, true);
 7500  
 7501  	return true;
 7502  }
 7503  
 7504  void
 7505  semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
 7506  {
 7507  	/*
 7508  	 * External events need to be tracked.
 7509  	 * Internal events do NOT need to be tracked.
 7510  	 */
 7511  
 7512  	switch (si->why) {
 7513  	case SUCCESSFUL_EXIT:
 7514  	case FAILED_EXIT:
 7515  	case OTHER_JOB_ENABLED:
 7516  	case OTHER_JOB_DISABLED:
 7517  	case OTHER_JOB_ACTIVE:
 7518  	case OTHER_JOB_INACTIVE:
 7519  		return;
 7520  	default:
 7521  		break;
 7522  	}
 7523  
 7524  	if (add) {
 7525  		runtime_add_weak_ref();
 7526  	} else {
 7527  		runtime_del_weak_ref();
 7528  	}
 7529  }
 7530  
 7531  void
 7532  semaphoreitem_delete(job_t j, struct semaphoreitem *si)
 7533  {
 7534  	semaphoreitem_runtime_mod_ref(si, false);
 7535  
 7536  	SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
 7537  
 7538  	// We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores.
 7539  	if ((si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) && j->nosy) {
 7540  		j->nosy = false;
 7541  		SLIST_REMOVE(&s_curious_jobs, j, job_s, curious_jobs_sle);
 7542  	}
 7543  
 7544  	free(si);
 7545  }
 7546  
 7547  void
 7548  semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
 7549  {
 7550  	struct semaphoreitem_dict_iter_context *sdic = context;
 7551  	semaphore_reason_t why;
 7552  
 7553  	why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
 7554  
 7555  	semaphoreitem_new(sdic->j, why, key);
 7556  }
 7557  
 7558  void
 7559  semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
 7560  {
 7561  	struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
 7562  	job_t j = context;
 7563  	semaphore_reason_t why;
 7564  
 7565  	switch (launch_data_get_type(obj)) {
 7566  	case LAUNCH_DATA_BOOL:
 7567  		if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
 7568  			why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
 7569  			semaphoreitem_new(j, why, NULL);
 7570  		} else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
 7571  			why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
 7572  			semaphoreitem_new(j, why, NULL);
 7573  			j->start_pending = true;
 7574  		} else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND) == 0) {
 7575  			j->needs_kickoff = launch_data_get_bool(obj);
 7576  		} else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_CRASHED) == 0) {
 7577  			why = launch_data_get_bool(obj) ? CRASHED : DID_NOT_CRASH;
 7578  			semaphoreitem_new(j, why, NULL);
 7579  			j->start_pending = true;
 7580  		} else {
 7581  			job_log(j, LOG_ERR, "Unrecognized KeepAlive attribute: %s", key);
 7582  		}
 7583  		break;
 7584  	case LAUNCH_DATA_DICTIONARY:
 7585  		if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
 7586  			sdic.why_true = OTHER_JOB_ACTIVE;
 7587  			sdic.why_false = OTHER_JOB_INACTIVE;
 7588  		} else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
 7589  			sdic.why_true = OTHER_JOB_ENABLED;
 7590  			sdic.why_false = OTHER_JOB_DISABLED;
 7591  		} else {
 7592  			job_log(j, LOG_ERR, "Unrecognized KeepAlive attribute: %s", key);
 7593  			break;
 7594  		}
 7595  
 7596  		launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
 7597  		break;
 7598  	default:
 7599  		job_log(j, LOG_ERR, "Unrecognized KeepAlive type: %u", launch_data_get_type(obj));
 7600  		break;
 7601  	}
 7602  }
 7603  
 7604  bool
 7605  externalevent_new(job_t j, struct eventsystem *sys, const char *evname, xpc_object_t event, uint64_t flags)
 7606  {
 7607  	if (j->event_monitor) {
 7608  		job_log(j, LOG_ERR, "The event monitor job cannot use LaunchEvents or XPC Events.");
 7609  		return false;
 7610  	}
 7611  
 7612  	struct externalevent *ee = (struct externalevent *)calloc(1, sizeof(struct externalevent) + strlen(evname) + 1);
 7613  	if (!ee) {
 7614  		return false;
 7615  	}
 7616  
 7617  	ee->event = xpc_retain(event);
 7618  	(void)strcpy(ee->name, evname);
 7619  	ee->job = j;
 7620  	ee->id = sys->curid;
 7621  	ee->sys = sys;
 7622  	ee->state = false;
 7623  	ee->wanted_state = true;
 7624  	sys->curid++;
 7625  
 7626  	if (flags & XPC_EVENT_FLAG_ENTITLEMENTS) {
 7627  		struct ldcred *ldc = runtime_get_caller_creds();
 7628  		if (ldc) {
 7629  			ee->entitlements = xpc_copy_entitlements_for_pid(ldc->pid);
 7630  		}
 7631  	}
 7632  
 7633  	if (sys == _launchd_support_system) {
 7634  		ee->internal = true;
 7635  	}
 7636  
 7637  	LIST_INSERT_HEAD(&j->events, ee, job_le);
 7638  	LIST_INSERT_HEAD(&sys->events, ee, sys_le);
 7639  
 7640  	job_log(j, LOG_DEBUG, "New event: %s/%s", sys->name, evname);
 7641  
 7642  	eventsystem_ping();
 7643  	return true;
 7644  }
 7645  
 7646  void
 7647  externalevent_delete(struct externalevent *ee)
 7648  {
 7649  	xpc_release(ee->event);
 7650  	if (ee->entitlements) {
 7651  		xpc_release(ee->entitlements);
 7652  	}
 7653  	LIST_REMOVE(ee, job_le);
 7654  	LIST_REMOVE(ee, sys_le);
 7655  
 7656  	free(ee);
 7657  
 7658  	eventsystem_ping();
 7659  }
 7660  
 7661  void
 7662  externalevent_setup(launch_data_t obj, const char *key, void *context)
 7663  {
 7664  	/* This method can ONLY be called on the job_import() path, as it assumes
 7665  	 * the input is a launch_data_t.
 7666  	 */
 7667  	struct externalevent_iter_ctx *ctx = (struct externalevent_iter_ctx *)context;
 7668  
 7669  	xpc_object_t xobj = ld2xpc(obj);
 7670  	if (xobj) {
 7671  		job_log(ctx->j, LOG_DEBUG, "Importing stream/event: %s/%s", ctx->sys->name, key);
 7672  		externalevent_new(ctx->j, ctx->sys, key, xobj, 0);
 7673  		xpc_release(xobj);
 7674  	} else {
 7675  		job_log(ctx->j, LOG_ERR, "Could not import event for job: %s", key);
 7676  	}
 7677  }
 7678  
 7679  struct externalevent *
 7680  externalevent_find(const char *sysname, uint64_t id)
 7681  {
 7682  	struct externalevent *ei = NULL;
 7683  
 7684  	struct eventsystem *es = eventsystem_find(sysname);
 7685  	if (es != NULL) {
 7686  		LIST_FOREACH(ei, &es->events, sys_le) {
 7687  			if (ei->id == id) {
 7688  				break;
 7689  			}
 7690  		}
 7691  	} else {
 7692  		launchd_syslog(LOG_ERR, "Could not find event system: %s", sysname);
 7693  	}
 7694  
 7695  	return ei;
 7696  }
 7697  
 7698  struct eventsystem *
 7699  eventsystem_new(const char *name)
 7700  {
 7701  	struct eventsystem *es = (struct eventsystem *)calloc(1, sizeof(struct eventsystem) + strlen(name) + 1);
 7702  	if (es != NULL) {
 7703  		es->curid = 1;
 7704  		(void)strcpy(es->name, name);
 7705  		LIST_INSERT_HEAD(&_s_event_systems, es, global_le);
 7706  	} else {
 7707  		(void)os_assumes_zero(errno);
 7708  	}
 7709  
 7710  	return es;
 7711  }
 7712  
 7713  void
 7714  eventsystem_delete(struct eventsystem *es)
 7715  {
 7716  	struct externalevent *ei = NULL;
 7717  	while ((ei = LIST_FIRST(&es->events))) {
 7718  		externalevent_delete(ei);
 7719  	}
 7720  
 7721  	LIST_REMOVE(es, global_le);
 7722  
 7723  	free(es);
 7724  }
 7725  
 7726  void
 7727  eventsystem_setup(launch_data_t obj, const char *key, void *context)
 7728  {
 7729  	job_t j = (job_t)context;
 7730  	if (!job_assumes(j, launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY)) {
 7731  		return;
 7732  	}
 7733  
 7734  	struct eventsystem *sys = eventsystem_find(key);
 7735  	if (unlikely(sys == NULL)) {
 7736  		sys = eventsystem_new(key);
 7737  		job_log(j, LOG_DEBUG, "New event system: %s", key);
 7738  	}
 7739  
 7740  	if (job_assumes(j, sys != NULL)) {
 7741  		struct externalevent_iter_ctx ctx = {
 7742  			.j = j,
 7743  			.sys = sys,
 7744  		};
 7745  
 7746  		job_log(j, LOG_DEBUG, "Importing events for stream: %s", key);
 7747  		launch_data_dict_iterate(obj, externalevent_setup, &ctx);
 7748  	}
 7749  }
 7750  
 7751  struct eventsystem *
 7752  eventsystem_find(const char *name)
 7753  {
 7754  	struct eventsystem *esi = NULL;
 7755  	LIST_FOREACH(esi, &_s_event_systems, global_le) {
 7756  		if (strcmp(name, esi->name) == 0) {
 7757  			break;
 7758  		}
 7759  	}
 7760  
 7761  	return esi;
 7762  }
 7763  
 7764  void
 7765  eventsystem_ping(void)
 7766  {
 7767  	if (!_launchd_event_monitor) {
 7768  		return;
 7769  	}
 7770  
 7771  	if (!_launchd_event_monitor->p) {
 7772  		(void)job_dispatch(_launchd_event_monitor, true);
 7773  	} else {
 7774  		if (_launchd_event_monitor->event_monitor_ready2signal) {
 7775  			(void)job_assumes_zero_p(_launchd_event_monitor, kill(_launchd_event_monitor->p, SIGUSR1));
 7776  		}
 7777  	}
 7778  }
 7779  
 7780  void
 7781  jobmgr_dispatch_all_semaphores(jobmgr_t jm)
 7782  {
 7783  	jobmgr_t jmi, jmn;
 7784  	job_t ji, jn;
 7785  
 7786  
 7787  	SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
 7788  		jobmgr_dispatch_all_semaphores(jmi);
 7789  	}
 7790  
 7791  	LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
 7792  		if (!SLIST_EMPTY(&ji->semaphores)) {
 7793  			job_dispatch(ji, false);
 7794  		}
 7795  	}
 7796  }
 7797  
 7798  time_t
 7799  cronemu(int mon, int mday, int hour, int min)
 7800  {
 7801  	struct tm workingtm;
 7802  	time_t now;
 7803  
 7804  	now = time(NULL);
 7805  	workingtm = *localtime(&now);
 7806  
 7807  	workingtm.tm_isdst = -1;
 7808  	workingtm.tm_sec = 0;
 7809  	workingtm.tm_min++;
 7810  
 7811  	while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
 7812  		workingtm.tm_year++;
 7813  		workingtm.tm_mon = 0;
 7814  		workingtm.tm_mday = 1;
 7815  		workingtm.tm_hour = 0;
 7816  		workingtm.tm_min = 0;
 7817  		mktime(&workingtm);
 7818  	}
 7819  
 7820  	return mktime(&workingtm);
 7821  }
 7822  
 7823  time_t
 7824  cronemu_wday(int wday, int hour, int min)
 7825  {
 7826  	struct tm workingtm;
 7827  	time_t now;
 7828  
 7829  	now = time(NULL);
 7830  	workingtm = *localtime(&now);
 7831  
 7832  	workingtm.tm_isdst = -1;
 7833  	workingtm.tm_sec = 0;
 7834  	workingtm.tm_min++;
 7835  
 7836  	if (wday == 7) {
 7837  		wday = 0;
 7838  	}
 7839  
 7840  	while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
 7841  		workingtm.tm_mday++;
 7842  		workingtm.tm_hour = 0;
 7843  		workingtm.tm_min = 0;
 7844  		mktime(&workingtm);
 7845  	}
 7846  
 7847  	return mktime(&workingtm);
 7848  }
 7849  
 7850  bool
 7851  cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
 7852  {
 7853  	if (mon == -1) {
 7854  		struct tm workingtm = *wtm;
 7855  		int carrytest;
 7856  
 7857  		while (!cronemu_mday(&workingtm, mday, hour, min)) {
 7858  			workingtm.tm_mon++;
 7859  			workingtm.tm_mday = 1;
 7860  			workingtm.tm_hour = 0;
 7861  			workingtm.tm_min = 0;
 7862  			carrytest = workingtm.tm_mon;
 7863  			mktime(&workingtm);
 7864  			if (carrytest != workingtm.tm_mon) {
 7865  				return false;
 7866  			}
 7867  		}
 7868  		*wtm = workingtm;
 7869  		return true;
 7870  	}
 7871  
 7872  	if (mon < wtm->tm_mon) {
 7873  		return false;
 7874  	}
 7875  
 7876  	if (mon > wtm->tm_mon) {
 7877  		wtm->tm_mon = mon;
 7878  		wtm->tm_mday = 1;
 7879  		wtm->tm_hour = 0;
 7880  		wtm->tm_min = 0;
 7881  	}
 7882  
 7883  	return cronemu_mday(wtm, mday, hour, min);
 7884  }
 7885  
 7886  bool
 7887  cronemu_mday(struct tm *wtm, int mday, int hour, int min)
 7888  {
 7889  	if (mday == -1) {
 7890  		struct tm workingtm = *wtm;
 7891  		int carrytest;
 7892  
 7893  		while (!cronemu_hour(&workingtm, hour, min)) {
 7894  			workingtm.tm_mday++;
 7895  			workingtm.tm_hour = 0;
 7896  			workingtm.tm_min = 0;
 7897  			carrytest = workingtm.tm_mday;
 7898  			mktime(&workingtm);
 7899  			if (carrytest != workingtm.tm_mday) {
 7900  				return false;
 7901  			}
 7902  		}
 7903  		*wtm = workingtm;
 7904  		return true;
 7905  	}
 7906  
 7907  	if (mday < wtm->tm_mday) {
 7908  		return false;
 7909  	}
 7910  
 7911  	if (mday > wtm->tm_mday) {
 7912  		wtm->tm_mday = mday;
 7913  		wtm->tm_hour = 0;
 7914  		wtm->tm_min = 0;
 7915  	}
 7916  
 7917  	return cronemu_hour(wtm, hour, min);
 7918  }
 7919  
 7920  bool
 7921  cronemu_hour(struct tm *wtm, int hour, int min)
 7922  {
 7923  	if (hour == -1) {
 7924  		struct tm workingtm = *wtm;
 7925  		int carrytest;
 7926  
 7927  		while (!cronemu_min(&workingtm, min)) {
 7928  			workingtm.tm_hour++;
 7929  			workingtm.tm_min = 0;
 7930  			carrytest = workingtm.tm_hour;
 7931  			mktime(&workingtm);
 7932  			if (carrytest != workingtm.tm_hour) {
 7933  				return false;
 7934  			}
 7935  		}
 7936  		*wtm = workingtm;
 7937  		return true;
 7938  	}
 7939  
 7940  	if (hour < wtm->tm_hour) {
 7941  		return false;
 7942  	}
 7943  
 7944  	if (hour > wtm->tm_hour) {
 7945  		wtm->tm_hour = hour;
 7946  		wtm->tm_min = 0;
 7947  	}
 7948  
 7949  	return cronemu_min(wtm, min);
 7950  }
 7951  
 7952  bool
 7953  cronemu_min(struct tm *wtm, int min)
 7954  {
 7955  	if (min == -1) {
 7956  		return true;
 7957  	}
 7958  
 7959  	if (min < wtm->tm_min) {
 7960  		return false;
 7961  	}
 7962  
 7963  	if (min > wtm->tm_min) {
 7964  		wtm->tm_min = min;
 7965  	}
 7966  
 7967  	return true;
 7968  }
 7969  
 7970  kern_return_t
 7971  job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
 7972  {
 7973  	struct ldcred *ldc = runtime_get_caller_creds();
 7974  	job_t js;
 7975  
 7976  	if (!j) {
 7977  		return BOOTSTRAP_NO_MEMORY;
 7978  	}
 7979  
 7980  	if (unlikely(j->deny_job_creation)) {
 7981  		return BOOTSTRAP_NOT_PRIVILEGED;
 7982  	}
 7983  
 7984  #if HAVE_SANDBOX
 7985  	const char **argv = (const char **)mach_cmd2argv(server_cmd);
 7986  	if (unlikely(argv == NULL)) {
 7987  		return BOOTSTRAP_NO_MEMORY;
 7988  	}
 7989  	if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_PATH, argv[0]) > 0)) {
 7990  		free(argv);
 7991  		return BOOTSTRAP_NOT_PRIVILEGED;
 7992  	}
 7993  	free(argv);
 7994  #endif
 7995  
 7996  	job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
 7997  
 7998  	if (pid1_magic) {
 7999  		if (ldc->euid || ldc->uid) {
 8000  			job_log(j, LOG_WARNING, "Server create attempt moved to per-user launchd: %s", server_cmd);
 8001  			return VPROC_ERR_TRY_PER_USER;
 8002  		}
 8003  	} else {
 8004  		if (unlikely(server_uid != getuid())) {
 8005  			job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
 8006  					server_cmd, getuid(), server_uid);
 8007  		}
 8008  		server_uid = 0; // zero means "do nothing"
 8009  	}
 8010  
 8011  	js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
 8012  
 8013  	if (unlikely(js == NULL)) {
 8014  		return BOOTSTRAP_NO_MEMORY;
 8015  	}
 8016  
 8017  	*server_portp = js->j_port;
 8018  	return BOOTSTRAP_SUCCESS;
 8019  }
 8020  
 8021  kern_return_t
 8022  job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
 8023  {
 8024  	struct ldcred *ldc = runtime_get_caller_creds();
 8025  	job_t otherj;
 8026  
 8027  	if (!j) {
 8028  		return BOOTSTRAP_NO_MEMORY;
 8029  	}
 8030  
 8031  	if (unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation) {
 8032  #if TARGET_OS_EMBEDDED	
 8033  		if (!j->embedded_god) {
 8034  			return BOOTSTRAP_NOT_PRIVILEGED;
 8035  		}
 8036  #else
 8037  		return BOOTSTRAP_NOT_PRIVILEGED;
 8038  #endif
 8039  	}
 8040  
 8041  #if HAVE_SANDBOX
 8042  	if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
 8043  		return BOOTSTRAP_NOT_PRIVILEGED;
 8044  	}
 8045  #endif
 8046  
 8047  	if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
 8048  		return BOOTSTRAP_UNKNOWN_SERVICE;
 8049  	}
 8050  
 8051  #if TARGET_OS_EMBEDDED
 8052  	if (j->embedded_god) {
 8053  		if (j->username && otherj->username) {
 8054  			if (strcmp(j->username, otherj->username) != 0) {
 8055  				return BOOTSTRAP_NOT_PRIVILEGED;
 8056  			}
 8057  		} else {
 8058  			return BOOTSTRAP_NOT_PRIVILEGED;
 8059  		}
 8060  	}
 8061  #endif	
 8062  
 8063  	if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
 8064  		bool do_block = otherj->p;
 8065  
 8066  		if (otherj->anonymous) {
 8067  			return BOOTSTRAP_NOT_PRIVILEGED;
 8068  		}
 8069  
 8070  		job_remove(otherj);
 8071  
 8072  		if (do_block) {
 8073  			job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
 8074  			// this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first...
 8075  			(void)job_assumes(otherj, waiting4removal_new(otherj, srp));
 8076  			return MIG_NO_REPLY;
 8077  		} else {
 8078  			return 0;
 8079  		}
 8080  	} else if (otherj->p) {
 8081  		(void)job_assumes_zero_p(j, kill2(otherj->p, sig));
 8082  	}
 8083  
 8084  	return 0;
 8085  }
 8086  
 8087  kern_return_t
 8088  job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
 8089  {
 8090  	struct ldcred *ldc = runtime_get_caller_creds();
 8091  
 8092  	if (!j) {
 8093  		return BOOTSTRAP_NO_MEMORY;
 8094  	}
 8095  #ifdef DARLING
 8096  	if (!job_assumes(j, j->per_user != 0)) {
 8097  #else
 8098  	if (!job_assumes(j, j->per_user)) {
 8099  #endif
 8100  		return BOOTSTRAP_NOT_PRIVILEGED;
 8101  	}
 8102  
 8103  	return launchd_log_forward(ldc->euid, ldc->egid, inval, invalCnt);
 8104  }
 8105  
 8106  kern_return_t
 8107  job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
 8108  {
 8109  	struct ldcred *ldc = runtime_get_caller_creds();
 8110  
 8111  	if (!j) {
 8112  		return BOOTSTRAP_NO_MEMORY;
 8113  	}
 8114  
 8115  	if (unlikely(ldc->euid)) {
 8116  		return BOOTSTRAP_NOT_PRIVILEGED;
 8117  	}
 8118  
 8119  	return launchd_log_drain(srp, outval, outvalCnt);
 8120  }
 8121  
 8122  kern_return_t
 8123  job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey,
 8124  	vm_offset_t inval, mach_msg_type_number_t invalCnt, vm_offset_t *outval,
 8125  	mach_msg_type_number_t *outvalCnt)
 8126  {
 8127  	const char *action;
 8128  	launch_data_t input_obj = NULL, output_obj = NULL;
 8129  	size_t data_offset = 0;
 8130  	size_t packed_size;
 8131  	struct ldcred *ldc = runtime_get_caller_creds();
 8132  
 8133  	if (!j) {
 8134  		return BOOTSTRAP_NO_MEMORY;
 8135  	}
 8136  
 8137  	if (inkey && ldc->pid != j->p) {
 8138  		if (ldc->euid && ldc->euid != getuid()) {
 8139  			return BOOTSTRAP_NOT_PRIVILEGED;
 8140  		}
 8141   	}
 8142  
 8143  	if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
 8144  		return 1;
 8145  	}
 8146  
 8147  	if (inkey && outkey) {
 8148  		action = "Swapping";
 8149  	} else if (inkey) {
 8150  		action = "Setting";
 8151  	} else {
 8152  		action = "Getting";
 8153  	}
 8154  
 8155  	job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
 8156  
 8157  	*outvalCnt = 20 * 1024 * 1024;
 8158  	mig_allocate(outval, *outvalCnt);
 8159  	if (!job_assumes(j, *outval != 0)) {
 8160  		return 1;
 8161  	}
 8162  
 8163  	/* Note to future maintainers: launch_data_unpack() does NOT return a heap
 8164  	 * object. The data is decoded in-place. So do not call launch_data_free()
 8165  	 * on input_obj.
 8166  	 */
 8167  	runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
 8168  	if (unlikely(invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL))) {
 8169  		goto out_bad;
 8170  	}
 8171  
 8172  	char *store = NULL;
 8173  	switch (outkey) {
 8174  	case VPROC_GSK_ENVIRONMENT:
 8175  		if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
 8176  			goto out_bad;
 8177  		}
 8178  		jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
 8179  		runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
 8180  		if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
 8181  			goto out_bad;
 8182  		}
 8183  		launch_data_free(output_obj);
 8184  		break;
 8185  	case VPROC_GSK_ALLJOBS:
 8186  		if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
 8187  			goto out_bad;
 8188  		}
 8189  		ipc_revoke_fds(output_obj);
 8190  		runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
 8191  		packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
 8192  		if (!job_assumes(j, packed_size != 0)) {
 8193  			goto out_bad;
 8194  		}
 8195  		launch_data_free(output_obj);
 8196  		break;
 8197  	case VPROC_GSK_MGR_NAME:
 8198  		if (!job_assumes(j, (output_obj = launch_data_new_string(j->mgr->name)) != NULL)) {
 8199  			goto out_bad;
 8200  		}
 8201  		packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
 8202  		if (!job_assumes(j, packed_size != 0)) {
 8203  			goto out_bad;
 8204  		}
 8205  
 8206  		launch_data_free(output_obj);
 8207  		break;
 8208  	case VPROC_GSK_JOB_OVERRIDES_DB:
 8209  		store = launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_DB, "overrides.plist");
 8210  		if (!store || !job_assumes(j, (output_obj = launch_data_new_string(store)) != NULL)) {
 8211  			free(store);
 8212  			goto out_bad;
 8213  		}
 8214  
 8215  		free(store);
 8216  		packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
 8217  		if (!job_assumes(j, packed_size != 0)) {
 8218  			goto out_bad;
 8219  		}
 8220  
 8221  		launch_data_free(output_obj);
 8222  		break;
 8223  	case VPROC_GSK_ZERO:
 8224  		mig_deallocate(*outval, *outvalCnt);
 8225  		*outval = 0;
 8226  		*outvalCnt = 0;
 8227  		break;
 8228  	default:
 8229  		goto out_bad;
 8230  	}
 8231  
 8232  	mig_deallocate(inval, invalCnt);
 8233  	return 0;
 8234  
 8235  out_bad:
 8236  	mig_deallocate(inval, invalCnt);
 8237  	if (*outval) {
 8238  		mig_deallocate(*outval, *outvalCnt);
 8239  	}
 8240  	if (output_obj) {
 8241  		launch_data_free(output_obj);
 8242  	}
 8243  
 8244  	return 1;
 8245  }
 8246  
 8247  kern_return_t
 8248  job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
 8249  {
 8250  	const char *action;
 8251  	kern_return_t kr = 0;
 8252  	struct ldcred *ldc = runtime_get_caller_creds();
 8253  	int oldmask;
 8254  
 8255  	if (!j) {
 8256  		return BOOTSTRAP_NO_MEMORY;
 8257  	}
 8258  
 8259  	if (inkey && ldc->pid != j->p) {
 8260  		if (ldc->euid && ldc->euid != getuid()) {
 8261  			return BOOTSTRAP_NOT_PRIVILEGED;
 8262  		}
 8263   	}
 8264  
 8265  	if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
 8266  		return 1;
 8267  	}
 8268  
 8269  	if (inkey && outkey) {
 8270  		action = "Swapping";
 8271  	} else if (inkey) {
 8272  		action = "Setting";
 8273  	} else {
 8274  		action = "Getting";
 8275  	}
 8276  
 8277  	job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
 8278  
 8279  	switch (outkey) {
 8280  	case VPROC_GSK_ABANDON_PROCESS_GROUP:
 8281  		*outval = j->abandon_pg;
 8282  		break;
 8283  	case VPROC_GSK_LAST_EXIT_STATUS:
 8284  		*outval = j->last_exit_status;
 8285  		break;
 8286  	case VPROC_GSK_MGR_UID:
 8287  		*outval = getuid();
 8288  		break;
 8289  	case VPROC_GSK_MGR_PID:
 8290  		*outval = getpid();
 8291  		break;
 8292  	case VPROC_GSK_IS_MANAGED:
 8293  		*outval = j->anonymous ? 0 : 1;
 8294  		break;
 8295  	case VPROC_GSK_BASIC_KEEPALIVE:
 8296  		*outval = !j->ondemand;
 8297  		break;
 8298  	case VPROC_GSK_START_INTERVAL:
 8299  		*outval = j->start_interval;
 8300  		break;
 8301  	case VPROC_GSK_IDLE_TIMEOUT:
 8302  		*outval = j->timeout;
 8303  		break;
 8304  	case VPROC_GSK_EXIT_TIMEOUT:
 8305  		*outval = j->exit_timeout;
 8306  		break;
 8307  	case VPROC_GSK_GLOBAL_LOG_MASK:
 8308  		oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
 8309  		*outval = oldmask;
 8310  		runtime_setlogmask(oldmask);
 8311  		break;
 8312  	case VPROC_GSK_GLOBAL_UMASK:
 8313  		oldmask = umask(0);
 8314  		*outval = oldmask;
 8315  		umask(oldmask);
 8316  		break;
 8317  	case VPROC_GSK_TRANSACTIONS_ENABLED:
 8318  		job_log(j, LOG_DEBUG, "Reading EnableTransactions value.");
 8319  		*outval = j->enable_transactions;
 8320  		break;
 8321  	case VPROC_GSK_WAITFORDEBUGGER:
 8322  		*outval = j->wait4debugger;
 8323  		break;
 8324  	case VPROC_GSK_EMBEDDEDROOTEQUIVALENT:
 8325  		*outval = j->embedded_god;
 8326  		break;
 8327  	case VPROC_GSK_ZERO:
 8328  		*outval = 0;
 8329  		break;
 8330  	default:
 8331  		kr = 1;
 8332  		break;
 8333  	}
 8334  
 8335  	switch (inkey) {
 8336  	case VPROC_GSK_ABANDON_PROCESS_GROUP:
 8337  		j->abandon_pg = (bool)inval;
 8338  		break;
 8339  	case VPROC_GSK_GLOBAL_ON_DEMAND:
 8340  		job_log(j, LOG_DEBUG, "Job has set global on-demand mode to: %s", inval ? "true" : "false");
 8341  		kr = job_set_global_on_demand(j, inval);
 8342  		break;
 8343  	case VPROC_GSK_BASIC_KEEPALIVE:
 8344  		j->ondemand = !inval;
 8345  		break;
 8346  	case VPROC_GSK_START_INTERVAL:
 8347  		if (inval > UINT32_MAX || inval < 0) {
 8348  			kr = 1;
 8349  		} else if (inval) {
 8350  			if (j->start_interval == 0) {
 8351  				runtime_add_weak_ref();
 8352  			}
 8353  			j->start_interval = (typeof(j->start_interval)) inval;
 8354  			(void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j));
 8355  		} else if (j->start_interval) {
 8356  			(void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
 8357  			if (j->start_interval != 0) {
 8358  				runtime_del_weak_ref();
 8359  			}
 8360  			j->start_interval = 0;
 8361  		}
 8362  		break;
 8363  	case VPROC_GSK_IDLE_TIMEOUT:
 8364  		if (inval < 0 || inval > UINT32_MAX) {
 8365  			kr = 1;
 8366  		} else {
 8367  			j->timeout = (typeof(j->timeout)) inval;
 8368  		}
 8369  		break;
 8370  	case VPROC_GSK_EXIT_TIMEOUT:
 8371  		if (inval < 0 || inval > UINT32_MAX) {
 8372  			kr = 1;
 8373  		} else {
 8374  			j->exit_timeout = (typeof(j->exit_timeout)) inval;
 8375  		}
 8376  		break;
 8377  	case VPROC_GSK_GLOBAL_LOG_MASK:
 8378  		if (inval < 0 || inval > UINT32_MAX) {
 8379  			kr = 1;
 8380  		} else {
 8381  			runtime_setlogmask((int) inval);
 8382  		}
 8383  		break;
 8384  	case VPROC_GSK_GLOBAL_UMASK:
 8385  		__OS_COMPILETIME_ASSERT__(sizeof (mode_t) == 2);
 8386  		if (inval < 0 || inval > UINT16_MAX) {
 8387  			kr = 1;
 8388  		} else {
 8389  #if HAVE_SANDBOX
 8390  			if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
 8391  				kr = 1;
 8392  			} else {
 8393  				umask((mode_t) inval);
 8394  			}
 8395  #endif
 8396  		}
 8397  		break;
 8398  	case VPROC_GSK_TRANSACTIONS_ENABLED:
 8399  		/* No-op. */
 8400  		break;
 8401  	case VPROC_GSK_WEIRD_BOOTSTRAP:
 8402  #ifdef DARLING
 8403  		if (job_assumes(j, j->weird_bootstrap != 0)) {
 8404  #else
 8405  		if (job_assumes(j, j->weird_bootstrap)) {
 8406  #endif
 8407  			job_log(j, LOG_DEBUG, "Unsetting weird bootstrap.");
 8408  
 8409  			mach_msg_size_t mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_job_subsystem);
 8410  
 8411  			if (job_mig_job_subsystem.maxsize > mxmsgsz) {
 8412  				mxmsgsz = job_mig_job_subsystem.maxsize;
 8413  			}
 8414  
 8415  			(void)job_assumes_zero(j, runtime_add_mport(j->mgr->jm_port, job_server));
 8416  			j->weird_bootstrap = false;
 8417  		}
 8418  		break;
 8419  	case VPROC_GSK_WAITFORDEBUGGER:
 8420  		j->wait4debugger_oneshot = inval;
 8421  		break;
 8422  	case VPROC_GSK_PERUSER_SUSPEND:
 8423  		if (job_assumes(j, pid1_magic && ldc->euid == 0)) {
 8424  			mach_port_t junk = MACH_PORT_NULL;
 8425  			job_t jpu = jobmgr_lookup_per_user_context_internal(j, (uid_t)inval, &junk);
 8426  			if (job_assumes(j, jpu != NULL)) {
 8427  				struct suspended_peruser *spi = NULL;
 8428  				LIST_FOREACH(spi, &j->suspended_perusers, sle) {
 8429  					if ((int64_t)(spi->j->mach_uid) == inval) {
 8430  						job_log(j, LOG_WARNING, "Job tried to suspend per-user launchd for UID %lli twice.", inval);
 8431  						break;
 8432  					}
 8433  				}
 8434  
 8435  				if (spi == NULL) {
 8436  					job_log(j, LOG_INFO, "Job is suspending the per-user launchd for UID %lli.", inval);
 8437  					spi = (struct suspended_peruser *)calloc(sizeof(struct suspended_peruser), 1);
 8438  					if (job_assumes(j, spi != NULL)) {
 8439  						/* Stop listening for events.
 8440  						 *
 8441  						 * See <rdar://problem/9014146>.
 8442  						 */
 8443  						if (jpu->peruser_suspend_count == 0) {
 8444  							job_ignore(jpu);
 8445  						}
 8446  
 8447  						spi->j = jpu;
 8448  						spi->j->peruser_suspend_count++;
 8449  						LIST_INSERT_HEAD(&j->suspended_perusers, spi, sle);
 8450  						job_stop(spi->j);
 8451  						*outval = jpu->p;
 8452  					} else {
 8453  						kr = BOOTSTRAP_NO_MEMORY;
 8454  					}
 8455  				}
 8456  			}
 8457  		} else {
 8458  			kr = 1;
 8459  		}
 8460  		break;
 8461  	case VPROC_GSK_PERUSER_RESUME:
 8462  		if (job_assumes(j, pid1_magic == true)) {
 8463  			struct suspended_peruser *spi = NULL, *spt = NULL;
 8464  			LIST_FOREACH_SAFE(spi, &j->suspended_perusers, sle, spt) {
 8465  				if ((int64_t)(spi->j->mach_uid) == inval) {
 8466  					spi->j->peruser_suspend_count--;
 8467  					LIST_REMOVE(spi, sle);
 8468  					job_log(j, LOG_INFO, "Job is resuming the per-user launchd for UID %lli.", inval);
 8469  					break;
 8470  				}
 8471  			}
 8472  
 8473  			if (!job_assumes(j, spi != NULL)) {
 8474  				job_log(j, LOG_WARNING, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval);
 8475  				kr = BOOTSTRAP_NOT_PRIVILEGED;
 8476  			} else if (spi->j->peruser_suspend_count == 0) {
 8477  				job_watch(spi->j);
 8478  				job_dispatch(spi->j, false);
 8479  				free(spi);
 8480  			}
 8481  		} else {
 8482  			kr = 1;
 8483  		}
 8484  		break;
 8485  	case VPROC_GSK_ZERO:
 8486  		break;
 8487  	default:
 8488  		kr = 1;
 8489  		break;
 8490  	}
 8491  
 8492  	return kr;
 8493  }
 8494  
 8495  kern_return_t
 8496  job_mig_post_fork_ping(job_t j, task_t child_task, mach_port_t *asport)
 8497  {
 8498  	if (!j) {
 8499  		return BOOTSTRAP_NO_MEMORY;
 8500  	}
 8501  
 8502  	job_log(j, LOG_DEBUG, "Post fork ping.");
 8503  
 8504  	struct machservice *ms;
 8505  	job_setup_exception_port(j, child_task);
 8506  	SLIST_FOREACH(ms, &special_ports, special_port_sle) {
 8507  		if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
 8508  			// The TASK_ACCESS_PORT funny business is to workaround 5325399.
 8509  			continue;
 8510  		}
 8511  
 8512  		errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
 8513  		if (errno) {
 8514  			if (errno == MACH_SEND_INVALID_DEST) {
 8515  				job_log(j, LOG_WARNING, "Task died before special ports could be set.");
 8516  				break;
 8517  			}
 8518  
 8519  			int desired_log_level = LOG_ERR;
 8520  			if (j->anonymous) {
 8521  				// 5338127
 8522  
 8523  				desired_log_level = LOG_WARNING;
 8524  
 8525  				if (ms->special_port_num == TASK_SEATBELT_PORT) {
 8526  					desired_log_level = LOG_DEBUG;
 8527  				}
 8528  			}
 8529  
 8530  			job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
 8531  		}
 8532  	}
 8533  
 8534  	/* MIG will not zero-initialize this pointer, so we must always do so.
 8535  	 *
 8536  	 * <rdar://problem/8562593>.
 8537  	 */
 8538  	*asport = MACH_PORT_NULL;
 8539  #if !TARGET_OS_EMBEDDED
 8540  	if (!j->anonymous) {
 8541  		/* XPC services will spawn into the root security session by default.
 8542  		 * xpcproxy will switch them away if needed.
 8543  		 */
 8544  		if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
 8545  			job_log(j, LOG_DEBUG, "Returning session port: 0x%x", j->asport);
 8546  			*asport = j->asport;
 8547  		}
 8548  	}
 8549  #endif
 8550  	(void)job_assumes_zero(j, launchd_mport_deallocate(child_task));
 8551  
 8552  	return 0;
 8553  }
 8554  
 8555  kern_return_t
 8556  job_mig_get_listener_port_rights(job_t j, mach_port_array_t *sports, mach_msg_type_number_t *sports_cnt)
 8557  {
 8558  	if (!j) {
 8559  		return BOOTSTRAP_NO_MEMORY;
 8560  	}
 8561  
 8562  	size_t cnt = 0;
 8563  	struct machservice *msi = NULL;
 8564  	SLIST_FOREACH(msi, &j->machservices, sle) {
 8565  #ifdef DARLING
 8566  		if (msi->upfront && job_assumes(j, msi->recv != 0)) {
 8567  #else
 8568  		if (msi->upfront && job_assumes(j, msi->recv)) {
 8569  #endif
 8570  			cnt++;
 8571  		}
 8572  	}
 8573  
 8574  	if (cnt == 0) {
 8575  		return BOOTSTRAP_UNKNOWN_SERVICE;
 8576  	}
 8577  
 8578  	mach_port_array_t sports2 = NULL;
 8579  	mig_allocate((vm_address_t *)&sports2, cnt * sizeof(sports2[0]));
 8580  	if (!sports2) {
 8581  		return BOOTSTRAP_NO_MEMORY;
 8582  	}
 8583  
 8584  	size_t i = 0;
 8585  	SLIST_FOREACH(msi, &j->machservices, sle) {
 8586  		if (msi->upfront && msi->recv) {
 8587  			sports2[i] = msi->port;
 8588  			i++;
 8589  		}
 8590  	}
 8591  
 8592  	*sports = sports2;
 8593  	*sports_cnt = cnt;
 8594  
 8595  	return KERN_SUCCESS;
 8596  }
 8597  
 8598  kern_return_t
 8599  job_mig_register_gui_session(job_t j, mach_port_t asport)
 8600  {
 8601  	if (!j->per_user) {
 8602  		return BOOTSTRAP_NOT_PRIVILEGED;
 8603  	}
 8604  
 8605  	jobmgr_t jm = jobmgr_find_xpc_per_user_domain(root_jobmgr, j->mach_uid);
 8606  	if (!jm) {
 8607  		return BOOTSTRAP_UNKNOWN_SERVICE;
 8608  	}
 8609  
 8610  	if (jm->req_gui_asport) {
 8611  		// This job manager persists, so we need to allow the per-user launchd
 8612  		// to update the GUI session as it comes and goes.
 8613  		jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_gui_asport));
 8614  	}
 8615  
 8616  	jm->req_gui_asport = asport;
 8617  	return KERN_SUCCESS;
 8618  }
 8619  
 8620  kern_return_t
 8621  job_mig_reboot2(job_t j, uint64_t flags)
 8622  {
 8623  	char who_started_the_reboot[2048] = "";
 8624  	struct proc_bsdshortinfo proc;
 8625  	struct ldcred *ldc = runtime_get_caller_creds();
 8626  	pid_t pid_to_log;
 8627  
 8628  	if (!j) {
 8629  		return BOOTSTRAP_NO_MEMORY;
 8630  	}
 8631  
 8632  	if (unlikely(!pid1_magic)) {
 8633  		return BOOTSTRAP_NOT_PRIVILEGED;
 8634  	}
 8635  
 8636  #if !TARGET_OS_EMBEDDED
 8637  	if (unlikely(ldc->euid)) {
 8638  #else
 8639  	if (unlikely(ldc->euid) && !j->embedded_god) {
 8640  #endif
 8641  		return BOOTSTRAP_NOT_PRIVILEGED;
 8642  	}
 8643  
 8644  	for (pid_to_log = ldc->pid; pid_to_log; pid_to_log = proc.pbsi_ppid) {
 8645  		size_t who_offset;
 8646  		if (proc_pidinfo(pid_to_log, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
 8647  			if (errno != ESRCH) {
 8648  				(void)job_assumes_zero(j, errno);
 8649  			}
 8650  			return 1;
 8651  		}
 8652  
 8653  		if (!job_assumes(j, pid_to_log != (pid_t)proc.pbsi_ppid)) {
 8654  			job_log(j, LOG_WARNING, "Job which is its own parent started reboot.");
 8655  			snprintf(who_started_the_reboot, sizeof(who_started_the_reboot), "%s[%u]->%s[%u]->%s[%u]->...", proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log);
 8656  			break;
 8657  		}
 8658  
 8659  		who_offset = strlen(who_started_the_reboot);
 8660  		snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
 8661  				" %s[%u]%s", proc.pbsi_comm, pid_to_log, proc.pbsi_ppid ? " ->" : "");
 8662  	}
 8663  
 8664  	root_jobmgr->reboot_flags = (int)flags;
 8665  	job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
 8666  	launchd_shutdown();
 8667  
 8668  	return 0;
 8669  }
 8670  
 8671  kern_return_t
 8672  job_mig_getsocket(job_t j, name_t spr)
 8673  {
 8674  	if (!j) {
 8675  		return BOOTSTRAP_NO_MEMORY;
 8676  	}
 8677  
 8678  	if (j->deny_job_creation) {
 8679  		return BOOTSTRAP_NOT_PRIVILEGED;
 8680  	}
 8681  
 8682  #if HAVE_SANDBOX
 8683  	struct ldcred *ldc = runtime_get_caller_creds(); 
 8684  	if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
 8685  		return BOOTSTRAP_NOT_PRIVILEGED;
 8686  	}
 8687  #endif
 8688  
 8689  	ipc_server_init();
 8690  
 8691  	if (unlikely(!sockpath)) {
 8692  		return BOOTSTRAP_NO_MEMORY;
 8693  	}
 8694  
 8695  	strncpy(spr, sockpath, sizeof(name_t));
 8696  
 8697  	return BOOTSTRAP_SUCCESS;
 8698  }
 8699  
 8700  kern_return_t
 8701  job_mig_log(job_t j, int pri, int err, logmsg_t msg)
 8702  {
 8703  	if (!j) {
 8704  		return BOOTSTRAP_NO_MEMORY;
 8705  	}
 8706  
 8707  	if ((errno = err)) {
 8708  		job_log_error(j, pri, "%s", msg);
 8709  	} else {
 8710  		job_log(j, pri, "%s", msg);
 8711  	}
 8712  
 8713  	return 0;
 8714  }
 8715  
 8716  void
 8717  job_setup_per_user_directory(job_t j, uid_t uid, const char *path)
 8718  {
 8719  	struct stat sb;
 8720  
 8721  	bool created = false;
 8722  	int r = stat(path, &sb);
 8723  	if ((r == -1 && errno == ENOENT) || (r == 0 && !S_ISDIR(sb.st_mode))) {
 8724  		if (r == 0) {
 8725  			job_log(j, LOG_NOTICE, "File at location of per-user launchd directory is not a directory. Moving aside: %s", path);
 8726  
 8727  			char old[PATH_MAX];
 8728  			snprintf(old, sizeof(old), "%s.movedaside", path);
 8729  			(void)job_assumes_zero_p(j, rename(path, old));
 8730  		}
 8731  
 8732  		(void)job_assumes_zero_p(j, mkdir(path, S_IRWXU));
 8733  		(void)job_assumes_zero_p(j, chown(path, uid, 0));
 8734  		created = true;
 8735  	}
 8736  
 8737  	if (!created) {
 8738  		if (sb.st_uid != uid) {
 8739  			job_log(j, LOG_NOTICE, "Per-user launchd directory has improper user ownership. Repairing: %s", path);
 8740  			(void)job_assumes_zero_p(j, chown(path, uid, 0));
 8741  		}
 8742  		if (sb.st_gid != 0) {
 8743  			job_log(j, LOG_NOTICE, "Per-user launchd directory has improper group ownership. Repairing: %s", path);
 8744  			(void)job_assumes_zero_p(j, chown(path, uid, 0));
 8745  		}
 8746  		if (sb.st_mode != (S_IRWXU | S_IFDIR)) {
 8747  			job_log(j, LOG_NOTICE, "Per-user launchd directory has improper mode. Repairing: %s", path);
 8748  			(void)job_assumes_zero_p(j, chmod(path, S_IRWXU));
 8749  		}
 8750  	}
 8751  }
 8752  
 8753  void
 8754  job_setup_per_user_directories(job_t j, uid_t uid, const char *label)
 8755  {
 8756  	char path[PATH_MAX];
 8757  
 8758  	(void)snprintf(path, sizeof(path), LAUNCHD_DB_PREFIX "/%s", label);
 8759  	job_setup_per_user_directory(j, uid, path);
 8760  
 8761  	(void)snprintf(path, sizeof(path), LAUNCHD_LOG_PREFIX "/%s", label);
 8762  	job_setup_per_user_directory(j, uid, path);
 8763  }
 8764  
 8765  job_t
 8766  jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp)
 8767  {
 8768  	job_t ji = NULL;
 8769  	LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
 8770  		if (!ji->per_user) {
 8771  			continue;
 8772  		}
 8773  		if (ji->mach_uid != which_user) {
 8774  			continue;
 8775  		}
 8776  		if (SLIST_EMPTY(&ji->machservices)) {
 8777  			continue;
 8778  		}
 8779  		if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
 8780  			continue;
 8781  		}
 8782  		break;
 8783  	}
 8784  
 8785  	if (unlikely(ji == NULL)) {
 8786  		struct machservice *ms;
 8787  		char lbuf[1024];
 8788  
 8789  		job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
 8790  
 8791  		sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
 8792  
 8793  		ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
 8794  
 8795  		if (ji != NULL) {
 8796  			auditinfo_addr_t auinfo = {
 8797  				.ai_termid = { 
 8798  					.at_type = AU_IPv4
 8799  				},
 8800  				.ai_auid = which_user,
 8801  				.ai_asid = AU_ASSIGN_ASID,
 8802  			};
 8803  
 8804  			if (setaudit_addr(&auinfo, sizeof(auinfo)) == 0) {
 8805  				job_log(ji, LOG_DEBUG, "Created new security session for per-user launchd: %u", auinfo.ai_asid);
 8806  				(void)job_assumes(ji, (ji->asport = audit_session_self()) != MACH_PORT_NULL);
 8807  
 8808  				/* Kinda lame that we have to do this, but we can't create an
 8809  				 * audit session without joining it.
 8810  				 */
 8811  				(void)job_assumes(ji, audit_session_join(launchd_audit_port));
 8812  				ji->asid = auinfo.ai_asid;
 8813  			} else {
 8814  				job_log(ji, LOG_WARNING, "Could not set audit session!");
 8815  				job_remove(ji);
 8816  				return NULL;
 8817  			}
 8818  
 8819  			ji->mach_uid = which_user;
 8820  			ji->per_user = true;
 8821  			ji->enable_transactions = true;
 8822  			job_setup_per_user_directories(ji, which_user, lbuf);
 8823  
 8824  			if ((ms = machservice_new(ji, lbuf, mp, false)) == NULL) {
 8825  				job_remove(ji);
 8826  				ji = NULL;
 8827  			} else {
 8828  				ms->upfront = true;
 8829  				ms->per_user_hack = true;
 8830  				ms->hide = true;
 8831  
 8832  				ji = job_dispatch(ji, false);
 8833  			}
 8834  		}
 8835  	} else {
 8836  		*mp = machservice_port(SLIST_FIRST(&ji->machservices));
 8837  		job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
 8838  	}
 8839  
 8840  	return ji;
 8841  }
 8842  
 8843  kern_return_t
 8844  job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
 8845  {
 8846  	struct ldcred *ldc = runtime_get_caller_creds();
 8847  	job_t jpu;
 8848  
 8849  	if (!j) {
 8850  		return BOOTSTRAP_NO_MEMORY;
 8851  	}
 8852  
 8853  	if (launchd_osinstaller) {
 8854  		return BOOTSTRAP_UNKNOWN_SERVICE;
 8855  	}
 8856  
 8857  #if TARGET_OS_EMBEDDED
 8858  	// There is no need for per-user launchd's on embedded.
 8859  	job_log(j, LOG_ERR, "Per-user launchds are not supported on this platform.");
 8860  	return BOOTSTRAP_UNKNOWN_SERVICE;
 8861  #endif
 8862  
 8863  #if HAVE_SANDBOX
 8864  	if (unlikely(sandbox_check(ldc->pid, "mach-per-user-lookup", SANDBOX_FILTER_NONE) > 0)) {
 8865  		return BOOTSTRAP_NOT_PRIVILEGED;
 8866  	}
 8867  #endif
 8868  
 8869  	job_log(j, LOG_INFO, "Looking up per user launchd for UID: %u", which_user);
 8870  
 8871  	if (unlikely(!pid1_magic)) {
 8872  		job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
 8873  		return BOOTSTRAP_NOT_PRIVILEGED;
 8874  	}
 8875  
 8876  	if (ldc->euid || ldc->uid) {
 8877  		which_user = ldc->euid ?: ldc->uid;
 8878  	}
 8879  
 8880  	*up_cont = MACH_PORT_NULL;
 8881  
 8882  	jpu = jobmgr_lookup_per_user_context_internal(j, which_user, up_cont);
 8883  
 8884  	return 0;
 8885  }
 8886  
 8887  kern_return_t
 8888  job_mig_check_in2(job_t j, name_t servicename, mach_port_t *serviceportp, uuid_t instance_id, uint64_t flags)
 8889  {
 8890  	bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
 8891  	bool strict = flags & BOOTSTRAP_STRICT_CHECKIN;
 8892  	struct ldcred *ldc = runtime_get_caller_creds();
 8893  	struct machservice *ms = NULL;
 8894  	job_t jo;
 8895  
 8896  	if (!j) {
 8897  		return BOOTSTRAP_NO_MEMORY;
 8898  	}
 8899  
 8900  	if (j->dedicated_instance) {
 8901  		struct machservice *msi = NULL;
 8902  		SLIST_FOREACH(msi, &j->machservices, sle) {
 8903  			if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
 8904  				uuid_copy(instance_id, j->instance_id);
 8905  				ms = msi;
 8906  				break;
 8907  			}
 8908  		}
 8909  	} else {
 8910  		ms = jobmgr_lookup_service(j->mgr, servicename, false, per_pid_service ? ldc->pid : 0);
 8911  	}
 8912  
 8913  	if (strict) {
 8914  		if (likely(ms != NULL)) {
 8915  			if (ms->job != j) {
 8916  				return BOOTSTRAP_NOT_PRIVILEGED;
 8917  			} else if (ms->isActive) {
 8918  				return BOOTSTRAP_SERVICE_ACTIVE;
 8919  			}
 8920  		} else {
 8921  			return BOOTSTRAP_UNKNOWN_SERVICE;
 8922  		}
 8923  	} else if (ms == NULL) {
 8924  		if (job_assumes(j, !j->dedicated_instance)) {
 8925  			*serviceportp = MACH_PORT_NULL;
 8926  
 8927  #if HAVE_SANDBOX
 8928  			if (unlikely(sandbox_check(ldc->pid, "mach-register", per_pid_service ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
 8929  				return BOOTSTRAP_NOT_PRIVILEGED;
 8930  			}
 8931  #endif
 8932  			if (unlikely((ms = machservice_new(j, servicename, serviceportp, per_pid_service)) == NULL)) {
 8933  				return BOOTSTRAP_NO_MEMORY;
 8934  			}
 8935  
 8936  			// Treat this like a legacy job.
 8937  			if (!j->legacy_mach_job) {
 8938  				ms->isActive = true;
 8939  				ms->recv = false;
 8940  			}
 8941  
 8942  			if (!(j->anonymous || j->legacy_LS_job || j->legacy_mach_job)) {
 8943  				job_log(j, LOG_APPLEONLY, "Please add the following service to the configuration file for this job: %s", servicename);
 8944  			}
 8945  		} else {
 8946  			return BOOTSTRAP_UNKNOWN_SERVICE;
 8947  		}
 8948  	} else {
 8949  		if (unlikely((jo = machservice_job(ms)) != j)) {
 8950  			static pid_t last_warned_pid;
 8951  
 8952  			if (last_warned_pid != ldc->pid) {
 8953  				job_log(jo, LOG_WARNING, "The following job tried to hijack the service \"%s\" from this job: %s", servicename, j->label);
 8954  				last_warned_pid = ldc->pid;
 8955  			}
 8956  
 8957  			return BOOTSTRAP_NOT_PRIVILEGED;
 8958  		}
 8959  		if (unlikely(machservice_active(ms))) {
 8960  			job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
 8961  			return BOOTSTRAP_SERVICE_ACTIVE;
 8962  		}
 8963  	}
 8964  
 8965  	job_checkin(j);
 8966  	machservice_request_notifications(ms);
 8967  
 8968  	job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
 8969  
 8970  	*serviceportp = machservice_port(ms);
 8971  	return BOOTSTRAP_SUCCESS;
 8972  }
 8973  
 8974  kern_return_t
 8975  job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
 8976  {
 8977  	struct machservice *ms;
 8978  	struct ldcred *ldc = runtime_get_caller_creds();
 8979  	bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
 8980  
 8981  	if (!j) {
 8982  		return BOOTSTRAP_NO_MEMORY;
 8983  	}
 8984  
 8985  	if (!per_pid_service && !j->legacy_LS_job) {
 8986  		job_log(j, LOG_APPLEONLY, "Performance: bootstrap_register() is deprecated. Service: %s", servicename);
 8987  	}
 8988  
 8989  	job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
 8990  
 8991  #if HAVE_SANDBOX
 8992  	if (unlikely(sandbox_check(ldc->pid, "mach-register", per_pid_service ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
 8993  		return BOOTSTRAP_NOT_PRIVILEGED;
 8994  	}
 8995  #endif
 8996  
 8997  	// 5641783 for the embedded hack
 8998  #if !TARGET_OS_EMBEDDED
 8999  	/*
 9000  	 * From a per-user/session launchd's perspective, SecurityAgent (UID
 9001  	 * 92) is a rogue application (not our UID, not root and not a child of
 9002  	 * us). We'll have to reconcile this design friction at a later date.
 9003  	 */
 9004  	if (unlikely(j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->uid != getuid() && ldc->uid != 92)) {
 9005  		if (pid1_magic) {
 9006  			return VPROC_ERR_TRY_PER_USER;
 9007  		} else {
 9008  			return BOOTSTRAP_NOT_PRIVILEGED;
 9009  		}
 9010  	}
 9011  #endif
 9012  
 9013  	ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc->pid : 0);
 9014  
 9015  	if (unlikely(ms)) {
 9016  		if (machservice_job(ms) != j) {
 9017  			return BOOTSTRAP_NOT_PRIVILEGED;
 9018  		}
 9019  		if (machservice_active(ms)) {
 9020  			job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
 9021  			return BOOTSTRAP_SERVICE_ACTIVE;
 9022  		}
 9023  		if (ms->recv && (serviceport != MACH_PORT_NULL)) {
 9024  			job_log(j, LOG_ERR, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename);
 9025  			return BOOTSTRAP_NOT_PRIVILEGED;
 9026  		}
 9027  		job_checkin(j);
 9028  		machservice_delete(j, ms, false);
 9029  	}
 9030  
 9031  	if (likely(serviceport != MACH_PORT_NULL)) {
 9032  		if (likely(ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
 9033  			machservice_request_notifications(ms);
 9034  		} else {
 9035  			return BOOTSTRAP_NO_MEMORY;
 9036  		}
 9037  	}
 9038  
 9039  
 9040  	return BOOTSTRAP_SUCCESS;
 9041  }
 9042  
 9043  kern_return_t
 9044  job_mig_look_up2(job_t j, mach_port_t srp, name_t servicename, mach_port_t *serviceportp, pid_t target_pid, uuid_t instance_id, uint64_t flags)
 9045  {
 9046  	struct machservice *ms = NULL;
 9047  	struct ldcred *ldc = runtime_get_caller_creds();
 9048  	kern_return_t kr;
 9049  	bool per_pid_lookup = flags & BOOTSTRAP_PER_PID_SERVICE;
 9050  	bool specific_instance = flags & BOOTSTRAP_SPECIFIC_INSTANCE;
 9051  	bool strict_lookup = flags & BOOTSTRAP_STRICT_LOOKUP;
 9052  	bool privileged = flags & BOOTSTRAP_PRIVILEGED_SERVER;
 9053  
 9054  	if (!j) {
 9055  		return BOOTSTRAP_NO_MEMORY;
 9056  	}
 9057  
 9058  	bool xpc_req = (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN);
 9059  
 9060  	// 5641783 for the embedded hack
 9061  #if !TARGET_OS_EMBEDDED
 9062  	if (unlikely(pid1_magic && j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->euid != 0)) {
 9063  		return VPROC_ERR_TRY_PER_USER;
 9064  	}
 9065  #endif
 9066  
 9067  #if HAVE_SANDBOX
 9068  	/* We don't do sandbox checking for XPC domains because, by definition, all
 9069  	 * the services within your domain should be accessible to you.
 9070  	 */
 9071  	if (!xpc_req && unlikely(sandbox_check(ldc->pid, "mach-lookup", per_pid_lookup ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
 9072  		return BOOTSTRAP_NOT_PRIVILEGED;
 9073  	}
 9074  #endif
 9075  
 9076  	if (per_pid_lookup) {
 9077  		ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
 9078  	} else {
 9079  		if (xpc_req) {
 9080  			// Requests from XPC domains stay local.
 9081  			ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
 9082  		} else {
 9083  			/* A strict lookup which is privileged won't even bother trying to
 9084  			 * find a service if we're not hosting the root Mach bootstrap.
 9085  			 */
 9086  			if (strict_lookup && privileged) {
 9087  				if (inherited_bootstrap_port == MACH_PORT_NULL) {
 9088  					ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
 9089  				}
 9090  			} else {
 9091  				ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
 9092  			}
 9093  		}
 9094  	}
 9095  
 9096  	if (likely(ms)) {
 9097  		ms = ms->alias ? ms->alias : ms;
 9098  		if (unlikely(specific_instance && ms->job->multiple_instances)) {
 9099  			job_t ji = NULL;
 9100  			job_t instance = NULL;
 9101  			LIST_FOREACH(ji, &ms->job->subjobs, subjob_sle) {
 9102  				if (uuid_compare(instance_id, ji->instance_id) == 0) {
 9103  					instance = ji;
 9104  					break;
 9105  				}
 9106  			}
 9107  
 9108  			if (unlikely(instance == NULL)) {
 9109  				job_log(ms->job, LOG_DEBUG, "Creating new instance of job based on lookup of service %s", ms->name);
 9110  				instance = job_new_subjob(ms->job, instance_id);
 9111  				if (job_assumes(j, instance != NULL)) {
 9112  					/* Disable this support for now. We only support having 
 9113  					 * multi-instance jobs within private XPC domains.
 9114  					 */
 9115  #if 0
 9116  					/* If the job is multi-instance, in a singleton XPC domain
 9117  					 * and the request is not coming from within that singleton
 9118  					 * domain, we need to alias the new job into the requesting
 9119  					 * domain.
 9120  					 */
 9121  					if (!j->mgr->xpc_singleton && xpc_req) {
 9122  						(void)job_assumes(instance, job_new_alias(j->mgr, instance));
 9123  					}
 9124  #endif
 9125  					job_dispatch(instance, false);
 9126  				}
 9127  			}
 9128  
 9129  			ms = NULL;
 9130  			if (job_assumes(j, instance != NULL)) {
 9131  				struct machservice *msi = NULL;
 9132  				SLIST_FOREACH(msi, &instance->machservices, sle) {
 9133  					/* sizeof(servicename) will return the size of a pointer,
 9134  					 * even though it's an array type, because when passing
 9135  					 * arrays as parameters in C, they implicitly degrade to
 9136  					 * pointers.
 9137  					 */
 9138  					if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
 9139  						ms = msi;
 9140  						break;
 9141  					}
 9142  				}
 9143  			}
 9144  		} else {
 9145  			if (machservice_hidden(ms) && !machservice_active(ms)) {
 9146  				ms = NULL;
 9147  			} else if (unlikely(ms->per_user_hack)) {
 9148  				ms = NULL;
 9149  			}
 9150  		}
 9151  	}
 9152  
 9153  	if (likely(ms)) {
 9154  		(void)job_assumes(j, machservice_port(ms) != MACH_PORT_NULL);
 9155  		job_log(j, LOG_DEBUG, "%sMach service lookup: %s", per_pid_lookup ? "Per PID " : "", servicename);
 9156  		*serviceportp = machservice_port(ms);
 9157  
 9158  		kr = BOOTSTRAP_SUCCESS;
 9159  	} else if (strict_lookup && !privileged) {
 9160  		/* Hack: We need to simulate XPC's desire not to establish a hierarchy.
 9161  		 * So if XPC is doing the lookup, and it's not a privileged lookup, we
 9162  		 * won't forward. But if it is a privileged lookup, then we must
 9163  		 * forward.
 9164  		 */
 9165  		return BOOTSTRAP_UNKNOWN_SERVICE;
 9166  	} else if (inherited_bootstrap_port != MACH_PORT_NULL) {
 9167  		// Requests from within an XPC domain don't get forwarded.
 9168  		job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
 9169  		/* Clients potentially check the audit token of the reply to verify that
 9170  		 * the returned send right is trustworthy.
 9171  		 */
 9172  		(void)job_assumes_zero(j, vproc_mig_look_up2_forward(inherited_bootstrap_port, srp, servicename, target_pid, instance_id, flags));
 9173  		return MIG_NO_REPLY;
 9174  	} else if (pid1_magic && j->anonymous && ldc->euid >= 500 && strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
 9175  		/* 5240036 Should start background session when a lookup of CCacheServer
 9176  		 * occurs
 9177  		 *
 9178  		 * This is a total hack. We sniff out loginwindow session, and attempt
 9179  		 * to guess what it is up to. If we find a EUID that isn't root, we
 9180  		 * force it over to the per-user context.
 9181  		 */
 9182  		return VPROC_ERR_TRY_PER_USER;
 9183  	} else {
 9184  		job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", per_pid_lookup ? "Per PID " : "", servicename);
 9185  		kr = BOOTSTRAP_UNKNOWN_SERVICE;
 9186  	}
 9187  
 9188  	return kr;
 9189  }
 9190  
 9191  kern_return_t
 9192  job_mig_parent(job_t j, mach_port_t srp, mach_port_t *parentport)
 9193  {
 9194  	if (!j) {
 9195  		return BOOTSTRAP_NO_MEMORY;
 9196  	}
 9197  
 9198  	job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
 9199  	jobmgr_t jm = j->mgr;
 9200  
 9201  	if (jobmgr_parent(jm)) {
 9202  		*parentport = jobmgr_parent(jm)->jm_port;
 9203  	} else if (MACH_PORT_NULL == inherited_bootstrap_port) {
 9204  		*parentport = jm->jm_port;
 9205  	} else {
 9206  		(void)job_assumes_zero(j, vproc_mig_parent_forward(inherited_bootstrap_port, srp));
 9207  		// The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now
 9208  		return MIG_NO_REPLY;
 9209  	}
 9210  	return BOOTSTRAP_SUCCESS;
 9211  }
 9212  
 9213  kern_return_t
 9214  job_mig_get_root_bootstrap(job_t j, mach_port_t *rootbsp)
 9215  {
 9216  	if (!j) {
 9217  		return BOOTSTRAP_NO_MEMORY;
 9218  	}
 9219  
 9220  	if (inherited_bootstrap_port == MACH_PORT_NULL) {
 9221  		*rootbsp = root_jobmgr->jm_port;
 9222  		(void)job_assumes_zero(j, launchd_mport_make_send(root_jobmgr->jm_port));
 9223  	} else {
 9224  		*rootbsp = inherited_bootstrap_port;
 9225  		(void)job_assumes_zero(j, launchd_mport_copy_send(inherited_bootstrap_port));
 9226  	}
 9227  
 9228  	return BOOTSTRAP_SUCCESS;
 9229  }
 9230  
 9231  kern_return_t
 9232  job_mig_info(job_t j, name_array_t *servicenamesp,
 9233  	unsigned int *servicenames_cnt, name_array_t *servicejobsp,
 9234  	unsigned int *servicejobs_cnt, bootstrap_status_array_t *serviceactivesp,
 9235  	unsigned int *serviceactives_cnt, uint64_t flags)
 9236  {
 9237  	name_array_t service_names = NULL;
 9238  	name_array_t service_jobs = NULL;
 9239  	bootstrap_status_array_t service_actives = NULL;
 9240  	unsigned int cnt = 0, cnt2 = 0;
 9241  	jobmgr_t jm;
 9242  
 9243  	if (!j) {
 9244  		return BOOTSTRAP_NO_MEMORY;
 9245  	}
 9246  
 9247  #if TARGET_OS_EMBEDDED
 9248  	struct ldcred *ldc = runtime_get_caller_creds();
 9249  	if (ldc->euid) {
 9250  		return EPERM;
 9251  	}
 9252  #endif // TARGET_OS_EMBEDDED
 9253  
 9254  	if (launchd_flat_mach_namespace) {
 9255  		if ((j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) || (flags & BOOTSTRAP_FORCE_LOCAL)) {
 9256  			jm = j->mgr;
 9257  		} else {
 9258  			jm = root_jobmgr;
 9259  		}
 9260  	} else {
 9261  		jm = j->mgr;
 9262  	}
 9263  
 9264  	unsigned int i = 0;
 9265  	struct machservice *msi = NULL;
 9266  	for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
 9267  		LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
 9268  			cnt += !msi->per_pid ? 1 : 0;
 9269  		}
 9270  	}
 9271  
 9272  	if (cnt == 0) {
 9273  		goto out;
 9274  	}
 9275  
 9276  	mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
 9277  	if (!job_assumes(j, service_names != NULL)) {
 9278  		goto out_bad;
 9279  	}
 9280  
 9281  	mig_allocate((vm_address_t *)&service_jobs, cnt * sizeof(service_jobs[0]));
 9282  	if (!job_assumes(j, service_jobs != NULL)) {
 9283  		goto out_bad;
 9284  	}
 9285  
 9286  	mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
 9287  	if (!job_assumes(j, service_actives != NULL)) {
 9288  		goto out_bad;
 9289  	}
 9290  
 9291  	for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
 9292  		LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
 9293  			if (!msi->per_pid) {
 9294  				strlcpy(service_names[cnt2], machservice_name(msi), sizeof(service_names[0]));
 9295  				msi = msi->alias ? msi->alias : msi;
 9296  				if (msi->job->mgr->shortdesc) {
 9297  					strlcpy(service_jobs[cnt2], msi->job->mgr->shortdesc, sizeof(service_jobs[0]));
 9298  				} else {
 9299  					strlcpy(service_jobs[cnt2], msi->job->label, sizeof(service_jobs[0]));
 9300  				}
 9301  				service_actives[cnt2] = machservice_status(msi);
 9302  				cnt2++;
 9303  			}
 9304  		}
 9305  	}
 9306  
 9307  	(void)job_assumes(j, cnt == cnt2);
 9308  
 9309  out:
 9310  	*servicenamesp = service_names;
 9311  	*servicejobsp = service_jobs;
 9312  	*serviceactivesp = service_actives;
 9313  	*servicenames_cnt = *servicejobs_cnt = *serviceactives_cnt = cnt;
 9314  
 9315  	return BOOTSTRAP_SUCCESS;
 9316  
 9317  out_bad:
 9318  	if (service_names) {
 9319  		mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
 9320  	}
 9321  	if (service_jobs) {
 9322  		mig_deallocate((vm_address_t)service_jobs, cnt * sizeof(service_jobs[0]));
 9323  	}
 9324  	if (service_actives) {
 9325  		mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
 9326  	}
 9327  
 9328  	return BOOTSTRAP_NO_MEMORY;
 9329  }
 9330  
 9331  kern_return_t
 9332  job_mig_lookup_children(job_t j, mach_port_array_t *child_ports,
 9333  	mach_msg_type_number_t *child_ports_cnt, name_array_t *child_names,
 9334  	mach_msg_type_number_t *child_names_cnt,
 9335  	bootstrap_property_array_t *child_properties,
 9336  	mach_msg_type_number_t *child_properties_cnt)
 9337  {
 9338  	kern_return_t kr = BOOTSTRAP_NO_MEMORY;
 9339  	if (!j) {
 9340  		return BOOTSTRAP_NO_MEMORY;
 9341  	}
 9342  
 9343  	struct ldcred *ldc = runtime_get_caller_creds();
 9344  
 9345  	/* Only allow root processes to look up children, even if we're in the per-user launchd.
 9346  	 * Otherwise, this could be used to cross sessions, which counts as a security vulnerability
 9347  	 * in a non-flat namespace.
 9348  	 */
 9349  	if (ldc->euid != 0) {
 9350  		job_log(j, LOG_WARNING, "Attempt to look up children of bootstrap by unprivileged job.");
 9351  		return BOOTSTRAP_NOT_PRIVILEGED;
 9352  	}
 9353  
 9354  	unsigned int cnt = 0;
 9355  
 9356  	jobmgr_t jmr = j->mgr;
 9357  	jobmgr_t jmi = NULL;
 9358  	SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
 9359  		cnt++;
 9360  	}
 9361  
 9362  	// Find our per-user launchds if we're PID 1.
 9363  	job_t ji = NULL;
 9364  	if (pid1_magic) {
 9365  		LIST_FOREACH(ji, &jmr->jobs, sle) {
 9366  			cnt += ji->per_user ? 1 : 0;
 9367  		}
 9368  	}
 9369  
 9370  	if (cnt == 0) {
 9371  		return BOOTSTRAP_NO_CHILDREN;
 9372  	}
 9373  
 9374  	mach_port_array_t _child_ports = NULL;
 9375  	name_array_t _child_names = NULL;
 9376  	bootstrap_property_array_t _child_properties = NULL;
 9377  
 9378  	mig_allocate((vm_address_t *)&_child_ports, cnt * sizeof(_child_ports[0]));
 9379  	if (!job_assumes(j, _child_ports != NULL)) {
 9380  		kr = BOOTSTRAP_NO_MEMORY;
 9381  		goto out_bad;
 9382  	}
 9383  
 9384  	mig_allocate((vm_address_t *)&_child_names, cnt * sizeof(_child_names[0]));
 9385  	if (!job_assumes(j, _child_names != NULL)) {
 9386  		kr = BOOTSTRAP_NO_MEMORY;
 9387  		goto out_bad;
 9388  	}
 9389  
 9390  	mig_allocate((vm_address_t *)&_child_properties, cnt * sizeof(_child_properties[0]));
 9391  	if (!job_assumes(j, _child_properties != NULL)) {
 9392  		kr = BOOTSTRAP_NO_MEMORY;
 9393  		goto out_bad;
 9394  	}
 9395  
 9396  	unsigned int cnt2 = 0;
 9397  	SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
 9398  		if (jobmgr_assumes_zero(jmi, launchd_mport_make_send(jmi->jm_port)) == KERN_SUCCESS) {
 9399  			_child_ports[cnt2] = jmi->jm_port;
 9400  		} else {
 9401  			_child_ports[cnt2] = MACH_PORT_NULL;
 9402  		}
 9403  
 9404  		strlcpy(_child_names[cnt2], jmi->name, sizeof(_child_names[0]));
 9405  		_child_properties[cnt2] = jmi->properties;
 9406  
 9407  		cnt2++;
 9408  	}
 9409  
 9410  	if (pid1_magic) LIST_FOREACH(ji, &jmr->jobs, sle) {
 9411  		if (ji->per_user) {
 9412  			if (job_assumes(ji, SLIST_FIRST(&ji->machservices)->per_user_hack == true)) {
 9413  				mach_port_t port = machservice_port(SLIST_FIRST(&ji->machservices));
 9414  
 9415  				if (job_assumes_zero(ji, launchd_mport_copy_send(port)) == KERN_SUCCESS) {
 9416  					_child_ports[cnt2] = port;
 9417  				} else {
 9418  					_child_ports[cnt2] = MACH_PORT_NULL;
 9419  				}
 9420  			} else {
 9421  				_child_ports[cnt2] = MACH_PORT_NULL;
 9422  			}
 9423  
 9424  			strlcpy(_child_names[cnt2], ji->label, sizeof(_child_names[0]));
 9425  			_child_properties[cnt2] |= BOOTSTRAP_PROPERTY_PERUSER;
 9426  
 9427  			cnt2++;
 9428  		}
 9429  	}
 9430  
 9431  	*child_names_cnt = cnt;
 9432  	*child_ports_cnt = cnt;
 9433  	*child_properties_cnt = cnt;
 9434  
 9435  	*child_names = _child_names;
 9436  	*child_ports = _child_ports;
 9437  	*child_properties = _child_properties;
 9438  
 9439  	unsigned int i = 0;
 9440  	for (i = 0; i < cnt; i++) {
 9441  		job_log(j, LOG_DEBUG, "child_names[%u] = %s", i, (char *)_child_names[i]);
 9442  	}
 9443  
 9444  	return BOOTSTRAP_SUCCESS;
 9445  out_bad:
 9446  	if (_child_ports) {
 9447  		mig_deallocate((vm_address_t)_child_ports, cnt * sizeof(_child_ports[0]));
 9448  	}
 9449  
 9450  	if (_child_names) {
 9451  		mig_deallocate((vm_address_t)_child_names, cnt * sizeof(_child_names[0]));
 9452  	}
 9453  
 9454  	if (_child_properties) {
 9455  		mig_deallocate((vm_address_t)_child_properties, cnt * sizeof(_child_properties[0]));
 9456  	}
 9457  
 9458  	return kr;
 9459  }
 9460  
 9461  kern_return_t
 9462  job_mig_pid_is_managed(job_t j __attribute__((unused)), pid_t p, boolean_t *managed)
 9463  {
 9464  	struct ldcred *ldc = runtime_get_caller_creds();
 9465  	if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
 9466  		return BOOTSTRAP_NOT_PRIVILEGED;
 9467  	}
 9468  
 9469  	/* This is so loginwindow doesn't try to quit GUI apps that have been launched
 9470  	 * directly by launchd as agents.
 9471  	 */
 9472  	job_t j_for_pid = jobmgr_find_by_pid_deep(root_jobmgr, p, false);
 9473  	if (j_for_pid && !j_for_pid->anonymous && !j_for_pid->legacy_LS_job) {
 9474  		*managed = true;
 9475  	}
 9476  
 9477  	return BOOTSTRAP_SUCCESS;
 9478  }
 9479  
 9480  kern_return_t
 9481  job_mig_port_for_label(job_t j __attribute__((unused)), name_t label, mach_port_t *mp)
 9482  {
 9483  	if (!j) {
 9484  		return BOOTSTRAP_NO_MEMORY;
 9485  	}
 9486  
 9487  	struct ldcred *ldc = runtime_get_caller_creds();
 9488  	kern_return_t kr = BOOTSTRAP_NOT_PRIVILEGED;
 9489  
 9490  #if HAVE_SANDBOX
 9491  	if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
 9492  		return BOOTSTRAP_NOT_PRIVILEGED;
 9493  	}
 9494  #endif
 9495  
 9496  	mach_port_t _mp = MACH_PORT_NULL;
 9497  	if (!j->deny_job_creation && (ldc->euid == 0 || ldc->euid == geteuid())) {
 9498  		job_t target_j = job_find(NULL, label);
 9499  		if (jobmgr_assumes(root_jobmgr, target_j != NULL)) {
 9500  			if (target_j->j_port == MACH_PORT_NULL) {
 9501  				(void)job_assumes(target_j, job_setup_machport(target_j) == true);
 9502  			}
 9503  
 9504  			_mp = target_j->j_port;
 9505  			kr = _mp != MACH_PORT_NULL ? BOOTSTRAP_SUCCESS : BOOTSTRAP_NO_MEMORY;
 9506  		} else {
 9507  			kr = BOOTSTRAP_NO_MEMORY;
 9508  		}
 9509  	}
 9510  
 9511  	*mp = _mp;
 9512  	return kr;
 9513  }
 9514  
 9515  kern_return_t
 9516  job_mig_set_security_session(job_t j, uuid_t uuid, mach_port_t asport)
 9517  {
 9518  #if TARGET_OS_EMBEDDED
 9519  	return KERN_SUCCESS;
 9520  #endif
 9521  
 9522  	if (!j) {
 9523  		return BOOTSTRAP_NO_MEMORY;
 9524  	}
 9525  
 9526  	uuid_string_t uuid_str;
 9527  	uuid_unparse(uuid, uuid_str);
 9528  	job_log(j, LOG_DEBUG, "Setting session %u for UUID %s...", asport, uuid_str);
 9529  
 9530  	job_t ji = NULL, jt = NULL;
 9531  	LIST_FOREACH_SAFE(ji, &s_needing_sessions, sle, jt) {
 9532  		uuid_string_t uuid_str2;
 9533  		uuid_unparse(ji->expected_audit_uuid, uuid_str2);
 9534  
 9535  		if (uuid_compare(uuid, ji->expected_audit_uuid) == 0) {
 9536  			uuid_clear(ji->expected_audit_uuid);
 9537  			if (asport != MACH_PORT_NULL) {
 9538  				job_log(ji, LOG_DEBUG, "Job should join session with port 0x%x", asport);
 9539  				(void)job_assumes_zero(j, launchd_mport_copy_send(asport));
 9540  			} else {
 9541  				job_log(ji, LOG_DEBUG, "No session to set for job. Using our session.");
 9542  			}
 9543  
 9544  			ji->asport = asport;
 9545  			LIST_REMOVE(ji, needing_session_sle);
 9546  
 9547  			if (ji->event_monitor) {
 9548  				eventsystem_ping();
 9549  			} else {
 9550  				job_dispatch(ji, false);
 9551  			}
 9552  		}
 9553  	}
 9554  
 9555  	/* Each job that the session port was set for holds a reference. At the end of
 9556  	 * the loop, there will be one extra reference belonging to this MiG protocol.
 9557  	 * We need to release it so that the session goes away when all the jobs
 9558  	 * referencing it are unloaded.
 9559  	 */
 9560  	(void)job_assumes_zero(j, launchd_mport_deallocate(asport));
 9561  
 9562  	return KERN_SUCCESS;
 9563  }
 9564  
 9565  jobmgr_t 
 9566  jobmgr_find_by_name(jobmgr_t jm, const char *where)
 9567  {
 9568  	jobmgr_t jmi, jmi2;
 9569  
 9570  	// NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic.
 9571  	if (where == NULL) {
 9572  		if (strcasecmp(jm->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
 9573  			where = VPROCMGR_SESSION_LOGINWINDOW;
 9574  		} else {
 9575  			where = VPROCMGR_SESSION_AQUA;
 9576  		}
 9577  	}
 9578  
 9579  	if (strcasecmp(jm->name, where) == 0) {
 9580  		return jm;
 9581  	}
 9582  
 9583  	if (strcasecmp(where, VPROCMGR_SESSION_BACKGROUND) == 0 && !pid1_magic) {
 9584  		jmi = root_jobmgr;
 9585  		goto jm_found;
 9586  	}
 9587  
 9588  	SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {	
 9589  		if (unlikely(jmi->shutting_down)) {
 9590  			continue;
 9591  		} else if (jmi->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
 9592  			continue;
 9593  		} else if (strcasecmp(jmi->name, where) == 0) {
 9594  			goto jm_found;
 9595  		} else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && pid1_magic) {
 9596  			SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
 9597  				if (strcasecmp(jmi2->name, where) == 0) {
 9598  					jmi = jmi2;
 9599  					goto jm_found;
 9600  				}
 9601  			}
 9602  		}
 9603  	}
 9604  
 9605  jm_found:
 9606  	return jmi;
 9607  }
 9608  
 9609  kern_return_t
 9610  job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type, mach_port_t asport, uint64_t flags)
 9611  {
 9612  	mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
 9613  	mach_port_array_t l2l_ports = NULL;
 9614  	mach_port_t reqport, rcvright;
 9615  	kern_return_t kr = 1;
 9616  	launch_data_t out_obj_array = NULL;
 9617  	struct ldcred *ldc = runtime_get_caller_creds();
 9618  	jobmgr_t jmr = NULL;
 9619  
 9620  	if (!j) {
 9621  		return BOOTSTRAP_NO_MEMORY;
 9622  	}
 9623  
 9624  	if (job_mig_intran2(root_jobmgr, target_subset, ldc->pid)) {
 9625  		job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
 9626  
 9627  		kr = BOOTSTRAP_NOT_PRIVILEGED;
 9628  		goto out;
 9629  	}
 9630  
 9631  	job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
 9632  
 9633  	kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
 9634  	if (job_assumes_zero(j, kr) != 0) {
 9635  		goto out;
 9636  	}
 9637  
 9638  	if (launch_data_array_get_count(out_obj_array) != l2l_port_cnt) {
 9639  		os_assert_zero(l2l_port_cnt);
 9640  	}
 9641  
 9642  	if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type, false, asport)) != NULL)) {
 9643  		kr = BOOTSTRAP_NO_MEMORY;
 9644  		goto out;
 9645  	}
 9646  
 9647  	if (strcmp(session_type, VPROCMGR_SESSION_AQUA) == 0) {
 9648  		jobmgr_log(jmr, LOG_NOTICE, "Registering new GUI session.");
 9649  		kr = vproc_mig_register_gui_session(inherited_bootstrap_port, asport);
 9650  		if (kr) {
 9651  			jobmgr_log(jmr, LOG_ERR, "Failed to register GUI session with PID 1: 0x%x/0x%x", inherited_bootstrap_port, kr);
 9652  		}
 9653  	}
 9654  
 9655  	jmr->properties |= BOOTSTRAP_PROPERTY_MOVEDSUBSET;
 9656  
 9657  	/* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
 9658  	 * processing an IPC request, we'll do this action before the new job manager can get any IPC
 9659  	 * requests. This serialization is guaranteed since we are single-threaded in that respect.
 9660  	 */
 9661  	if (flags & LAUNCH_GLOBAL_ON_DEMAND) {
 9662  		// This is so awful.
 9663  		// Remove the job from its current job manager.
 9664  		LIST_REMOVE(j, sle);
 9665  		LIST_REMOVE(j, pid_hash_sle);
 9666  
 9667  		// Put the job into the target job manager.
 9668  		LIST_INSERT_HEAD(&jmr->jobs, j, sle);
 9669  		LIST_INSERT_HEAD(&jmr->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
 9670  
 9671  		j->mgr = jmr;
 9672  		job_set_global_on_demand(j, true);
 9673  
 9674  		if (!j->holds_ref) {
 9675  			job_log(j, LOG_PERF, "Job moved subset into: %s", j->mgr->name);
 9676  			j->holds_ref = true;
 9677  			runtime_add_ref();
 9678  		}
 9679  	}
 9680  
 9681  	for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
 9682  		launch_data_t tmp, obj_at_idx;
 9683  		struct machservice *ms;
 9684  		job_t j_for_service;
 9685  		const char *serv_name;
 9686  		pid_t target_pid;
 9687  		bool serv_perpid;
 9688  
 9689  		(void)job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
 9690  		(void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
 9691  		target_pid = (pid_t)launch_data_get_integer(tmp);
 9692  		(void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
 9693  		serv_perpid = launch_data_get_bool(tmp);
 9694  		(void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
 9695  		serv_name = launch_data_get_string(tmp);
 9696  
 9697  		j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
 9698  
 9699  		if (unlikely(!j_for_service)) {
 9700  			// The PID probably exited
 9701  			(void)job_assumes_zero(j, launchd_mport_deallocate(l2l_ports[l2l_i]));
 9702  			continue;
 9703  		}
 9704  
 9705  		if (likely(ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
 9706  			job_log(j, LOG_DEBUG, "Importing %s into new bootstrap.", serv_name);
 9707  			machservice_request_notifications(ms);
 9708  		}
 9709  	}
 9710  
 9711  	kr = 0;
 9712  
 9713  out:
 9714  	if (out_obj_array) {
 9715  		launch_data_free(out_obj_array);
 9716  	}
 9717  
 9718  	if (l2l_ports) {
 9719  		mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
 9720  	}
 9721  
 9722  	if (kr == 0) {
 9723  		if (target_subset) {
 9724  			(void)job_assumes_zero(j, launchd_mport_deallocate(target_subset));
 9725  		}
 9726  		if (asport) {
 9727  			(void)job_assumes_zero(j, launchd_mport_deallocate(asport));
 9728  		}
 9729  	} else if (jmr) {
 9730  		jobmgr_shutdown(jmr);
 9731  	}
 9732  
 9733  	return kr;
 9734  }
 9735  
 9736  kern_return_t
 9737  job_mig_init_session(job_t j, name_t session_type, mach_port_t asport)
 9738  {
 9739  	if (!j) {
 9740  		return BOOTSTRAP_NO_MEMORY;
 9741  	}
 9742  
 9743  	job_t j2;
 9744  
 9745  	kern_return_t kr = BOOTSTRAP_NO_MEMORY;
 9746  	if (j->mgr->session_initialized) {
 9747  		job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
 9748  		kr = BOOTSTRAP_NOT_PRIVILEGED;
 9749  		return kr;
 9750  	} else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
 9751  		jobmgr_t jmi;
 9752  
 9753  		/*
 9754  		 * 5330262
 9755  		 *
 9756  		 * We're working around LoginWindow and the WindowServer.
 9757  		 *
 9758  		 * In practice, there is only one LoginWindow session. Unfortunately, for certain
 9759  		 * scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
 9760  		 * spawns a replacement loginwindow session before cleaning up the previous one.
 9761  		 *
 9762  		 * We're going to use the creation of a new LoginWindow context as a clue that the
 9763  		 * previous LoginWindow context is on the way out and therefore we should just
 9764  		 * kick-start the shutdown of it.
 9765  		 */
 9766  
 9767  		SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
 9768  			if (unlikely(jmi->shutting_down)) {
 9769  				continue;
 9770  			} else if (strcasecmp(jmi->name, session_type) == 0) {
 9771  				jobmgr_shutdown(jmi);
 9772  				break;
 9773  			}
 9774  		}
 9775  	} else if (strcmp(session_type, VPROCMGR_SESSION_AQUA) == 0) {
 9776  		(void)job_assumes_zero(j, runtime_remove_mport(j->mgr->jm_port));
 9777   	}
 9778  
 9779  	jobmgr_log(j->mgr, LOG_DEBUG, "Initializing as %s", session_type);
 9780  	strcpy(j->mgr->name_init, session_type);
 9781  
 9782  	if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
 9783  		j2->asport = asport;
 9784  		(void)job_assumes(j, job_dispatch(j2, true));
 9785  		kr = BOOTSTRAP_SUCCESS;
 9786  	}
 9787  
 9788  	return kr;
 9789  }
 9790  
 9791  kern_return_t
 9792  job_mig_switch_to_session(job_t j, mach_port_t requestor_port, name_t session_name, mach_port_t asport, mach_port_t *new_bsport)
 9793  {
 9794  	struct ldcred *ldc = runtime_get_caller_creds();
 9795  	if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
 9796  		jobmgr_log(root_jobmgr, LOG_ERR, "%s() called with NULL job: PID %d", __func__, ldc->pid);
 9797  		return BOOTSTRAP_NO_MEMORY;
 9798  	}
 9799  
 9800  	if (j->mgr->shutting_down) {
 9801  		return BOOTSTRAP_UNKNOWN_SERVICE;
 9802  	}
 9803  
 9804  	job_log(j, LOG_DEBUG, "Job wants to move to %s session.", session_name);
 9805  
 9806  	if (!job_assumes(j, pid1_magic == false)) {
 9807  		job_log(j, LOG_WARNING, "Switching sessions is not allowed in the system Mach bootstrap.");
 9808  		return BOOTSTRAP_NOT_PRIVILEGED;
 9809  	}
 9810  
 9811  	if (!j->anonymous) {
 9812  		job_log(j, LOG_NOTICE, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
 9813  		return BOOTSTRAP_NOT_PRIVILEGED;
 9814  	}
 9815  
 9816  	jobmgr_t target_jm = jobmgr_find_by_name(root_jobmgr, session_name);
 9817  	if (target_jm == j->mgr) {
 9818  		job_log(j, LOG_DEBUG, "Job is already in its desired session (%s).", session_name);
 9819  		(void)job_assumes_zero(j, launchd_mport_deallocate(asport));
 9820  		(void)job_assumes_zero(j, launchd_mport_deallocate(requestor_port));
 9821  		*new_bsport = target_jm->jm_port;
 9822  		return BOOTSTRAP_SUCCESS;
 9823  	}
 9824  
 9825  	if (!target_jm) {
 9826  		target_jm = jobmgr_new(j->mgr, requestor_port, MACH_PORT_NULL, false, session_name, false, asport);
 9827  		if (target_jm) {
 9828  			target_jm->properties |= BOOTSTRAP_PROPERTY_IMPLICITSUBSET;
 9829  			(void)job_assumes_zero(j, launchd_mport_deallocate(asport));
 9830  		}
 9831  	}
 9832  
 9833  	if (!job_assumes(j, target_jm != NULL)) {
 9834  		job_log(j, LOG_WARNING, "Could not find %s session!", session_name);
 9835  		return BOOTSTRAP_NO_MEMORY;
 9836  	}
 9837  
 9838  	// Remove the job from it's current job manager.
 9839  	LIST_REMOVE(j, sle);
 9840  	LIST_REMOVE(j, pid_hash_sle);
 9841  
 9842  	job_t ji = NULL, jit = NULL;
 9843  	LIST_FOREACH_SAFE(ji, &j->mgr->global_env_jobs, global_env_sle, jit) {
 9844  		if (ji == j) {
 9845  			LIST_REMOVE(ji, global_env_sle);
 9846  			break;
 9847  		}
 9848  	}
 9849  
 9850  	// Put the job into the target job manager.
 9851  	LIST_INSERT_HEAD(&target_jm->jobs, j, sle);
 9852  	LIST_INSERT_HEAD(&target_jm->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
 9853  
 9854  	if (ji) {
 9855  		LIST_INSERT_HEAD(&target_jm->global_env_jobs, j, global_env_sle);
 9856  	}
 9857  
 9858  	// Move our Mach services over if we're not in a flat namespace.
 9859  	if (!launchd_flat_mach_namespace && !SLIST_EMPTY(&j->machservices)) {
 9860  		struct machservice *msi = NULL, *msit = NULL;
 9861  		SLIST_FOREACH_SAFE(msi, &j->machservices, sle, msit) {
 9862  			LIST_REMOVE(msi, name_hash_sle);
 9863  			LIST_INSERT_HEAD(&target_jm->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
 9864  		}
 9865  	}
 9866  
 9867  	j->mgr = target_jm;
 9868  
 9869  	if (!j->holds_ref) {
 9870  		/* Anonymous jobs which move around are particularly interesting to us, so we want to
 9871  		 * stick around while they're still around.
 9872  		 * For example, login calls into the PAM launchd module, which moves the process into
 9873  		 * the StandardIO session by default. So we'll hold a reference on that job to prevent
 9874  		 * ourselves from going away.
 9875  		 */
 9876  		j->holds_ref = true;
 9877  		job_log(j, LOG_PERF, "Job switched into manager: %s", j->mgr->name);
 9878  		runtime_add_ref();
 9879  	}
 9880  
 9881  	*new_bsport = target_jm->jm_port;
 9882  
 9883  	return KERN_SUCCESS;
 9884  }
 9885  
 9886  kern_return_t
 9887  job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
 9888  		vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
 9889  		mach_port_array_t *portsp, unsigned int *ports_cnt)
 9890  {
 9891  	launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
 9892  	mach_port_array_t ports = NULL;
 9893  	unsigned int cnt = 0, cnt2 = 0;
 9894  	size_t packed_size;
 9895  	struct machservice *ms;
 9896  	jobmgr_t jm;
 9897  	job_t ji;
 9898  
 9899  	if (!j) {
 9900  		return BOOTSTRAP_NO_MEMORY;
 9901  	}
 9902  
 9903  	jm = j->mgr;
 9904  
 9905  	if (unlikely(!pid1_magic)) {
 9906  		job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
 9907  		return BOOTSTRAP_NOT_PRIVILEGED;
 9908  	}
 9909  	if (unlikely(jobmgr_parent(jm) == NULL)) {
 9910  		job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
 9911  		return BOOTSTRAP_NOT_PRIVILEGED;
 9912  	}
 9913  	if (unlikely(strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0)) {
 9914  		job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
 9915  		return BOOTSTRAP_NOT_PRIVILEGED;
 9916  	}
 9917  	if (unlikely(!j->anonymous)) {
 9918  		job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
 9919  		return BOOTSTRAP_NOT_PRIVILEGED;
 9920  	}
 9921  
 9922  	job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
 9923  
 9924  	outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
 9925  	if (!job_assumes(j, outdata_obj_array)) {
 9926  		goto out_bad;
 9927  	}
 9928  
 9929  	*outdataCnt = 20 * 1024 * 1024;
 9930  	mig_allocate(outdata, *outdataCnt);
 9931  	if (!job_assumes(j, *outdata != 0)) {
 9932  		return 1;
 9933  	}
 9934  
 9935  	LIST_FOREACH(ji, &j->mgr->jobs, sle) {
 9936  		if (!ji->anonymous) {
 9937  			continue;
 9938  		}
 9939  		SLIST_FOREACH(ms, &ji->machservices, sle) {
 9940  			cnt++;
 9941  		}
 9942  	}
 9943  
 9944  	mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
 9945  	if (!job_assumes(j, ports != NULL)) {
 9946  		goto out_bad;
 9947  	}
 9948  
 9949  	LIST_FOREACH(ji, &j->mgr->jobs, sle) {
 9950  		if (!ji->anonymous) {
 9951  			continue;
 9952  		}
 9953  
 9954  		SLIST_FOREACH(ms, &ji->machservices, sle) {
 9955  			if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
 9956  				(void)job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
 9957  			} else {
 9958  				goto out_bad;
 9959  			}
 9960  
 9961  			if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
 9962  				(void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
 9963  			} else {
 9964  				goto out_bad;
 9965  			}
 9966  
 9967  			if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
 9968  				(void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
 9969  			} else {
 9970  				goto out_bad;
 9971  			}
 9972  
 9973  			if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
 9974  				(void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
 9975  			} else {
 9976  				goto out_bad;
 9977  			}
 9978  
 9979  			ports[cnt2] = machservice_port(ms);
 9980  
 9981  			// Increment the send right by one so we can shutdown the jobmgr cleanly
 9982  			(void)jobmgr_assumes_zero(jm, launchd_mport_copy_send(ports[cnt2]));
 9983  			cnt2++;
 9984  		}
 9985  	}
 9986  
 9987  	(void)job_assumes(j, cnt == cnt2);
 9988  
 9989  	runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
 9990  	packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
 9991  	if (!job_assumes(j, packed_size != 0)) {
 9992  		goto out_bad;
 9993  	}
 9994  
 9995  	launch_data_free(outdata_obj_array);
 9996  
 9997  	*portsp = ports;
 9998  	*ports_cnt = cnt;
 9999  
10000  	*reqport = jm->req_port;
10001  	*rcvright = jm->jm_port;
10002  
10003  	jm->req_port = 0;
10004  	jm->jm_port = 0;
10005  
10006  	workaround_5477111 = j;
10007  
10008  	jobmgr_shutdown(jm);
10009  
10010  	return BOOTSTRAP_SUCCESS;
10011  
10012  out_bad:
10013  	if (outdata_obj_array) {
10014  		launch_data_free(outdata_obj_array);
10015  	}
10016  	if (*outdata) {
10017  		mig_deallocate(*outdata, *outdataCnt);
10018  	}
10019  	if (ports) {
10020  		mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
10021  	}
10022  
10023  	return BOOTSTRAP_NO_MEMORY;
10024  }
10025  
10026  kern_return_t
10027  job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
10028  {
10029  	int bsdepth = 0;
10030  	jobmgr_t jmr;
10031  
10032  	if (!j) {
10033  		return BOOTSTRAP_NO_MEMORY;
10034  	}
10035  	if (j->mgr->shutting_down) {
10036  		return BOOTSTRAP_UNKNOWN_SERVICE;
10037  	}
10038  
10039  	jmr = j->mgr;
10040  
10041  	while ((jmr = jobmgr_parent(jmr)) != NULL) {
10042  		bsdepth++;
10043  	}
10044  
10045  	// Since we use recursion, we need an artificial depth for subsets
10046  	if (unlikely(bsdepth > 100)) {
10047  		job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
10048  		return BOOTSTRAP_NO_MEMORY;
10049  	}
10050  
10051  	char name[NAME_MAX];
10052  	snprintf(name, sizeof(name), "%s[%i].subset.%i", j->anonymous ? j->prog : j->label, j->p, MACH_PORT_INDEX(requestorport));
10053  
10054  	if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, name, true, j->asport)) != NULL)) {
10055  		if (unlikely(requestorport == MACH_PORT_NULL)) {
10056  			return BOOTSTRAP_NOT_PRIVILEGED;
10057  		}
10058  		return BOOTSTRAP_NO_MEMORY;
10059  	}
10060  
10061  	*subsetportp = jmr->jm_port;
10062  	jmr->properties |= BOOTSTRAP_PROPERTY_EXPLICITSUBSET;
10063  
10064  	/* A job could create multiple subsets, so only add a reference the first time
10065  	 * it does so we don't have to keep a count.
10066  	 */
10067  	if (j->anonymous && !j->holds_ref) {
10068  		job_log(j, LOG_PERF, "Job created subset: %s", jmr->name);
10069  		j->holds_ref = true;
10070  		runtime_add_ref();
10071  	}
10072  
10073  	job_log(j, LOG_DEBUG, "Job created a subset named \"%s\"", jmr->name);
10074  	return BOOTSTRAP_SUCCESS;
10075  }
10076  
10077  job_t
10078  _xpc_domain_import_service(jobmgr_t jm, launch_data_t pload)
10079  {
10080  	jobmgr_t where2put = NULL;
10081  
10082  	if (launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY) {
10083  		errno = EINVAL;
10084  		return NULL;
10085  	}
10086  
10087  	launch_data_t ldlabel = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL);
10088  	if (!ldlabel || launch_data_get_type(ldlabel) != LAUNCH_DATA_STRING) {
10089  		errno = EINVAL;
10090  		return NULL;
10091  	}
10092  
10093  	const char *label = launch_data_get_string(ldlabel);
10094  	jobmgr_log(jm, LOG_DEBUG, "Importing service: %s", label);
10095  
10096  	launch_data_t destname = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_XPCDOMAIN);
10097  	if (destname) {
10098  		bool supported_domain = false;
10099  
10100  		if (launch_data_get_type(destname) == LAUNCH_DATA_STRING) {
10101  			const char *str = launch_data_get_string(destname);
10102  			if (strcmp(str, XPC_DOMAIN_TYPE_SYSTEM) == 0) {
10103  				where2put = _s_xpc_system_domain;
10104  			} else if (strcmp(str, XPC_DOMAIN_TYPE_PERUSER) == 0) {
10105  				where2put = jobmgr_find_xpc_per_user_domain(jm, jm->req_euid);
10106  				supported_domain = true;
10107  			} else if (strcmp(str, XPC_DOMAIN_TYPE_PERSESSION) == 0) {
10108  				where2put = jobmgr_find_xpc_per_session_domain(jm, jm->req_asid);
10109  			} else {
10110  				jobmgr_log(jm, LOG_ERR, "Invalid XPC domain type: %s", str);
10111  				errno = EINVAL;
10112  			}
10113  		} else {
10114  			jobmgr_log(jm, LOG_ERR, "XPC domain type is not a string.");
10115  			errno = EINVAL;
10116  		}
10117  
10118  		if (where2put && !supported_domain) {
10119  			launch_data_t mi = NULL;
10120  			if ((mi = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_MULTIPLEINSTANCES))) {
10121  				if (launch_data_get_type(mi) == LAUNCH_DATA_BOOL && launch_data_get_bool(mi)) {
10122  					jobmgr_log(where2put, LOG_ERR, "Multiple-instance services are not supported in this domain.");
10123  					where2put = NULL;
10124  					errno = EINVAL;
10125  				}
10126  			}
10127  		}
10128  	} else {
10129  		where2put = jm;
10130  	}
10131  
10132  	job_t j = NULL;
10133  	if (where2put) {
10134  		/* Gross. If the service already exists in a singleton domain, then
10135  		 * jobmgr_import2() will return the existing job. But if we fail to alias
10136  		 * this job, we will normally want to remove it. But if we did not create
10137  		 * it in the first place, then we need to avoid removing it. So check
10138  		 * errno against EEXIST in the success case and if it's EEXIST, then do
10139  		 * not remove the original job in the event of a failed alias.
10140  		 *
10141  		 * This really needs to be re-thought, but I think it'll require a larger
10142  		 * evaluation of launchd's data structures. Right now, once a job is
10143  		 * imported into a singleton domain, it won't be removed until the system
10144  		 * shuts down, but that may not always be true. If it ever changes, we'll
10145  		 * have a problem because we'll have to account for all existing aliases
10146  		 * and clean them up somehow. Or just start ref-counting. I knew this
10147  		 * aliasing stuff would be trouble...
10148  		 *
10149  		 * <rdar://problem/10646503>
10150  		 */
10151  		jobmgr_log(where2put, LOG_DEBUG, "Importing service...");
10152  
10153  		errno = 0;
10154  		if ((j = jobmgr_import2(where2put, pload))) {
10155  			bool created = (errno != EEXIST);
10156  			j->xpc_service = true;
10157  
10158  			if (where2put->xpc_singleton) {
10159  				/* If the service was destined for one of the global domains,
10160  				 * then we have to alias it into our local domain to reserve the
10161  				 * name.
10162  				 */
10163  				job_t ja = NULL;
10164  				if (!(ja = job_new_alias(jm, j))) {
10165  					/* If we failed to alias the job because of a conflict over
10166  					 * the label, then we remove it from the global domain. We
10167  					 * don't want to risk having imported a malicious job into
10168  					 * one of the global domains.
10169  					 */
10170  					if (errno != EEXIST) {
10171  						job_log(j, LOG_ERR, "Failed to alias job into: %s: %d: %s", where2put->name, errno, strerror(errno));
10172  					} else {
10173  						errno = 0;
10174  					}
10175  
10176  					if (created) {
10177  						jobmgr_log(jm, LOG_WARNING, "Singleton service already existed in job-local namespace. Removing: %s", j->label);
10178  						job_remove(j);
10179  					}
10180  
10181  					j = NULL;
10182  				} else {
10183  					jobmgr_log(jm, LOG_DEBUG, "Aliased service into local domain: %s", j->label);
10184  					(void)job_dispatch(j, false);
10185  					ja->xpc_service = true;
10186  					j = ja;
10187  				}
10188  			} else {
10189  				(void)job_dispatch(j, false);
10190  			}
10191  		}
10192  	} else {
10193  		jobmgr_log(jm, LOG_DEBUG, "Could not find destination for service: %s", label);
10194  	}
10195  
10196  	return j;
10197  }
10198  
10199  int
10200  _xpc_domain_import_services(job_t j, launch_data_t services)
10201  {
10202  	int error = EINVAL;
10203  	if (launch_data_get_type(services) != LAUNCH_DATA_ARRAY) {
10204  		return error;
10205  	}
10206  
10207  	size_t i = 0;
10208  	size_t c = launch_data_array_get_count(services);
10209  	jobmgr_log(j->mgr, LOG_DEBUG, "Importing new services: %lu", c);
10210  
10211  	for (i = 0; i < c; i++) {
10212  		jobmgr_log(j->mgr, LOG_DEBUG, "Importing service at index: %lu", i);
10213  
10214  		job_t nj = NULL;
10215  		launch_data_t ploadi = launch_data_array_get_index(services, i);
10216  		if (!(nj = _xpc_domain_import_service(j->mgr, ploadi))) {
10217  			if (!j->mgr->session_initialized && errno) {
10218  				/* Service import failures are only fatal if the domain is being
10219  				 * initialized. If we're extending the domain, we can run into
10220  				 * errors with services already existing, so we just ignore them.
10221  				 * In the case of a domain extension, we don't want to halt the
10222  				 * operation if we run into an error with one service.
10223  				 *
10224  				 * <rdar://problem/10842779>
10225  				 */
10226  				jobmgr_log(j->mgr, LOG_ERR, "Failed to import service at index: %lu: %d: %s", i, errno, strerror(errno));
10227  				error = errno;
10228  				break;
10229  			}
10230  		} else {
10231  			jobmgr_log(j->mgr, LOG_DEBUG, "Imported service: %s", nj->label);
10232  		}
10233  	}
10234  
10235  	if (i == c) {
10236  		error = 0;
10237  	}
10238  
10239  	return error;
10240  }
10241  
10242  kern_return_t
10243  xpc_domain_import2(job_t j, mach_port_t reqport, mach_port_t dport)
10244  {
10245  	if (unlikely(!pid1_magic)) {
10246  		job_log(j, LOG_ERR, "XPC domains may only reside in PID 1.");
10247  		return BOOTSTRAP_NOT_PRIVILEGED;
10248  	}
10249  	if (!j || !MACH_PORT_VALID(reqport)) {
10250  		return BOOTSTRAP_UNKNOWN_SERVICE;
10251  	}
10252  	if (root_jobmgr->shutting_down) {
10253  		jobmgr_log(root_jobmgr, LOG_ERR, "Attempt to create new domain while shutting down.");
10254  		return BOOTSTRAP_NOT_PRIVILEGED;
10255  	}
10256  	if (!j->xpc_bootstrapper) {
10257  		job_log(j, LOG_ERR, "Attempt to create new XPC domain by unprivileged job.");
10258  		return BOOTSTRAP_NOT_PRIVILEGED;
10259  	}
10260  
10261  	kern_return_t kr = BOOTSTRAP_NO_MEMORY;
10262  	/* All XPC domains are children of the root job manager. What we're creating
10263  	 * here is really just a skeleton. By creating it, we're adding reqp to our
10264  	 * port set. It will have two messages on it. The first specifies the
10265  	 * environment of the originator. This is so we can cache it and hand it to
10266  	 * xpcproxy to bootstrap our services. The second is the set of jobs that is
10267  	 * to be bootstrapped in.
10268  	 */
10269  	jobmgr_t jm = jobmgr_new(root_jobmgr, reqport, dport, false, NULL, true, MACH_PORT_NULL);
10270  	if (job_assumes(j, jm != NULL)) {
10271  		jm->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
10272  		jm->shortdesc = "private";
10273  		kr = BOOTSTRAP_SUCCESS;
10274  	}
10275  
10276  	return kr;
10277  }
10278  
10279  kern_return_t
10280  xpc_domain_set_environment(job_t j, mach_port_t rp, mach_port_t bsport, mach_port_t excport, vm_offset_t ctx, mach_msg_type_number_t ctx_sz)
10281  {
10282  	if (!j) {
10283  		/* Due to the whacky nature of XPC service bootstrapping, we can end up
10284  		 * getting this message long after the requesting process has gone away.
10285  		 * See <rdar://problem/8593143>.
10286  		 */
10287  		return BOOTSTRAP_UNKNOWN_SERVICE;
10288  	}
10289  
10290  	jobmgr_t jm = j->mgr;
10291  	if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10292  		return BOOTSTRAP_NOT_PRIVILEGED;
10293  	}
10294  
10295  	if (jm->req_asport != MACH_PORT_NULL) {
10296  		return BOOTSTRAP_NOT_PRIVILEGED;
10297  	}
10298  
10299  	struct ldcred *ldc = runtime_get_caller_creds();
10300  	struct proc_bsdinfowithuniqid proc;
10301  	if (proc_pidinfo(ldc->pid, PROC_PIDT_BSDINFOWITHUNIQID, 1, &proc, PROC_PIDT_BSDINFOWITHUNIQID_SIZE) == 0) {
10302  		if (errno != ESRCH) {
10303  			(void)jobmgr_assumes_zero(jm, errno);
10304  		}
10305  
10306  		jm->error = errno;
10307  		jobmgr_remove(jm);
10308  		return BOOTSTRAP_NO_MEMORY;
10309  	}
10310  
10311  #if !TARGET_OS_EMBEDDED
10312  	if (jobmgr_assumes_zero(jm, audit_session_port(ldc->asid, &jm->req_asport)) != 0) {
10313  		jm->error = EPERM;
10314  		jobmgr_remove(jm);
10315  		job_log(j, LOG_ERR, "Failed to get port for ASID: %u", ldc->asid);
10316  		return BOOTSTRAP_NOT_PRIVILEGED;
10317  	}
10318  #else
10319  	jm->req_asport = MACH_PORT_DEAD;
10320  #endif
10321  
10322  	struct waiting4attach *w4ai = NULL;
10323  	struct waiting4attach *w4ait = NULL;
10324  	LIST_FOREACH_SAFE(w4ai, &_launchd_domain_waiters, le, w4ait) {
10325  		if (w4ai->dest == ldc->pid) {
10326  			jobmgr_log(jm, LOG_DEBUG, "Migrating attach for: %s", w4ai->name);
10327  			LIST_REMOVE(w4ai, le);
10328  			LIST_INSERT_HEAD(&jm->attaches, w4ai, le);
10329  			w4ai->dest = 0;
10330  		}
10331  	}
10332  
10333  	(void)snprintf(jm->name_init, NAME_MAX, "com.apple.xpc.domain.%s.%d", proc.pbsd.pbi_comm, ldc->pid);
10334  	strlcpy(jm->owner, proc.pbsd.pbi_comm, sizeof(jm->owner));
10335  	jm->req_bsport = bsport;
10336  	jm->req_excport = excport;
10337  	jm->req_rport = rp;
10338  	jm->req_ctx = ctx;
10339  	jm->req_ctx_sz = ctx_sz;
10340  	jm->req_pid = ldc->pid;
10341  	jm->req_euid = ldc->euid;
10342  	jm->req_egid = ldc->egid;
10343  	jm->req_asid = ldc->asid;
10344  	jm->req_uniqueid = proc.p_uniqidentifier.p_uniqueid;
10345  
10346  	return KERN_SUCCESS;
10347  }
10348  
10349  kern_return_t
10350  xpc_domain_load_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
10351  {
10352  	if (!j) {
10353  		return BOOTSTRAP_UNKNOWN_SERVICE;
10354  	}
10355  
10356   	job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
10357   	if (!(rootj && rootj->xpc_bootstrapper)) {
10358  		job_log(j, LOG_ERR, "Attempt to load services into XPC domain by unprivileged job.");
10359  		return BOOTSTRAP_NOT_PRIVILEGED;
10360  	}
10361  
10362  	// This is just for XPC domains (for now).
10363  	if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10364  		return BOOTSTRAP_NOT_PRIVILEGED;
10365  	}
10366  	if (j->mgr->session_initialized) {
10367  		jobmgr_log(j->mgr, LOG_ERR, "Attempt to initialize an already-initialized XPC domain.");
10368  		return BOOTSTRAP_NOT_PRIVILEGED;
10369  	}
10370  
10371  	size_t offset = 0;
10372  	launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
10373  	if (!services) {
10374  		return BOOTSTRAP_NO_MEMORY;
10375  	}
10376  
10377  	int error = _xpc_domain_import_services(j, services);
10378  	if (error) {
10379  		j->mgr->error = error;
10380  		jobmgr_log(j->mgr, LOG_ERR, "Obliterating domain.");
10381  		jobmgr_remove(j->mgr);
10382  	} else {
10383  		j->mgr->session_initialized = true;
10384  		(void)jobmgr_assumes_zero(j->mgr, xpc_call_wakeup(j->mgr->req_rport, BOOTSTRAP_SUCCESS));
10385  		j->mgr->req_rport = MACH_PORT_NULL;
10386  
10387  		/* Returning a failure code will destroy the message, whereas returning
10388  		 * success will not, so we need to clean up here.
10389  		 */
10390  		mig_deallocate(services_buff, services_sz);
10391  		error = BOOTSTRAP_SUCCESS;
10392  	}
10393  
10394  	return error;
10395  }
10396  
10397  kern_return_t
10398  xpc_domain_check_in(job_t j, mach_port_t *bsport, mach_port_t *sbsport, 
10399  	mach_port_t *excport, mach_port_t *asport, uint32_t *uid, uint32_t *gid,
10400  	int32_t *asid, vm_offset_t *ctx, mach_msg_type_number_t *ctx_sz)
10401  {
10402  	if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
10403  		return BOOTSTRAP_UNKNOWN_SERVICE;
10404  	}
10405  	jobmgr_t jm = j->mgr;
10406  	if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10407  		return BOOTSTRAP_NOT_PRIVILEGED;
10408  	}
10409  
10410  	if (jm->req_asport == MACH_PORT_NULL) {
10411  		return BOOTSTRAP_NOT_PRIVILEGED;
10412  	}
10413  
10414  	*bsport = jm->req_bsport;
10415  	*sbsport = root_jobmgr->jm_port;
10416  	*excport = jm->req_excport;
10417  	if (j->joins_gui_session) {
10418  		if (jm->req_gui_asport) {
10419  			*asport = jm->req_gui_asport;
10420  		} else {
10421  			job_log(j, LOG_NOTICE, "No GUI session set for UID of user service. This service may not act properly.");
10422  			*asport = jm->req_asport;
10423  		}
10424  	} else {
10425  		*asport = jm->req_asport;
10426  	}
10427  
10428  	*uid = jm->req_euid;
10429  	*gid = jm->req_egid;
10430  	*asid = jm->req_asid;
10431  
10432  	*ctx = jm->req_ctx;
10433  	*ctx_sz = jm->req_ctx_sz;
10434  
10435  	return KERN_SUCCESS;
10436  }
10437  
10438  kern_return_t
10439  xpc_domain_get_service_name(job_t j, event_name_t name)
10440  {
10441  	if (!j) {
10442  		return BOOTSTRAP_NO_MEMORY;
10443  	}
10444  
10445  	if (!j->xpc_service) {
10446  		jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name by non-XPC service: %s", j->label);
10447  		return BOOTSTRAP_NOT_PRIVILEGED;
10448  	}
10449  
10450  	const char *what2find = j->label;
10451  	if (j->dedicated_instance) {
10452  		what2find = j->original->label;
10453  	}
10454  
10455  	struct machservice *msi = NULL;
10456  	SLIST_FOREACH(msi, &j->machservices, sle) {
10457  		if (strcmp(msi->name, what2find) == 0) {
10458  			break;
10459  		}
10460  	}
10461  
10462  	if (!msi) {
10463  		jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name that does not exist: %s", j->label);
10464  		return BOOTSTRAP_UNKNOWN_SERVICE;
10465  	}
10466  
10467  	(void)strlcpy(name, msi->name, sizeof(event_name_t));
10468  	return BOOTSTRAP_SUCCESS;
10469  }
10470  
10471  #if XPC_LPI_VERSION >= 20111216
10472  kern_return_t
10473  xpc_domain_add_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
10474  {
10475  	if (!j) {
10476  		return BOOTSTRAP_UNKNOWN_SERVICE;
10477  	}
10478  
10479   	job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
10480   	if (!(rootj && rootj->xpc_bootstrapper)) {
10481  		job_log(j, LOG_ERR, "Attempt to add service to XPC domain by unprivileged job.");
10482  		return BOOTSTRAP_NOT_PRIVILEGED;
10483  	}
10484  
10485  	if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10486  		return BOOTSTRAP_NOT_PRIVILEGED;
10487  	}
10488  
10489  	size_t offset = 0;
10490  	launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
10491  	if (!services) {
10492  		return BOOTSTRAP_NO_MEMORY;
10493  	}
10494  
10495  	int error = _xpc_domain_import_services(j, services);
10496  	if (!error) {
10497  		mig_deallocate(services_buff, services_sz);
10498  	}
10499  
10500  	return error;
10501  }
10502  #endif
10503  
10504  #pragma mark XPC Events
10505  int
10506  xpc_event_find_channel(job_t j, const char *stream, struct machservice **ms)
10507  {
10508  	int error = EXNOMEM;
10509  	struct machservice *msi = NULL;
10510  	SLIST_FOREACH(msi, &j->machservices, sle) {
10511  		if (strcmp(stream, msi->name) == 0) {
10512  			break;
10513  		}
10514  	}
10515  
10516  	if (!msi) {
10517  		mach_port_t sp = MACH_PORT_NULL;
10518  		msi = machservice_new(j, stream, &sp, false);
10519  		if (!msi) {
10520  			return EXNOMEM;
10521  		}
10522  
10523  		job_log(j, LOG_DEBUG, "Creating new MachService for stream: %s", stream);
10524  		/* Hack to keep this from being publicly accessible through
10525  		 * bootstrap_look_up().
10526  		 */
10527  		if (!j->dedicated_instance) {
10528  			LIST_REMOVE(msi, name_hash_sle);
10529  		}
10530  		msi->event_channel = true;
10531  
10532  		/* If we call job_dispatch() here before the audit session for the job
10533  		 * has been set, we'll end up not watching this service. But we also have
10534  		 * to take care not to watch the port if the job is active.
10535  		 *
10536  		 * See <rdar://problem/10357855>.
10537  		 */
10538  		if (!j->currently_ignored) {
10539  			machservice_watch(j, msi);
10540  		}
10541  
10542  		error = 0;
10543  		*ms = msi;
10544  	} else if (!msi->event_channel) {
10545  		job_log(j, LOG_ERR, "This job registered a MachService name identical to the requested event channel name: %s", stream);
10546  		error = EEXIST;
10547  	} else {
10548  		error = 0;
10549  		*ms = msi;
10550  	}
10551  
10552  	return error;
10553  }
10554  
10555  int
10556  xpc_event_get_event_name(job_t j, xpc_object_t request, xpc_object_t *reply)
10557  {
10558  	const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10559  	if (!stream) {
10560  		return EXINVAL;
10561  	}
10562  
10563  	uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10564  	if (!token) {
10565  		return EXINVAL;
10566  	}
10567  
10568  	job_log(j, LOG_DEBUG, "Getting event name for stream/token: %s/0x%llu", stream, token);
10569  
10570  	int result = ESRCH;
10571  	struct externalevent *event = externalevent_find(stream, token);
10572  	if (event && j->event_monitor) {
10573  		xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10574  		xpc_dictionary_set_string(reply2, XPC_EVENT_ROUTINE_KEY_NAME, event->name);
10575  		*reply = reply2;
10576  
10577  		job_log(j, LOG_DEBUG, "Found: %s", event->name);
10578  		result = 0;
10579  	}
10580  
10581  	return result;
10582  }
10583  
10584  int
10585  xpc_event_copy_entitlements(job_t j, xpc_object_t request, xpc_object_t *reply)
10586  {
10587  	const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10588  	if (!stream) {
10589  		return EXINVAL;
10590  	}
10591  
10592  	uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10593  	if (!token) {
10594  		return EXINVAL;
10595  	}
10596  
10597  	job_log(j, LOG_DEBUG, "Getting entitlements for stream/token: %s/0x%llu", stream, token);
10598  
10599  	int result = ESRCH;
10600  	struct externalevent *event = externalevent_find(stream, token);
10601  	if (event && j->event_monitor) {
10602  		xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10603  		xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_ENTITLEMENTS, event->entitlements);
10604  		*reply = reply2;
10605  
10606  		job_log(j, LOG_DEBUG, "Found: %s", event->name);
10607  		result = 0;
10608  	}
10609  
10610  	return result;
10611  }
10612  
10613  // TODO - can be removed with rdar://problem/12666150
10614  #ifndef XPC_EVENT_FLAG_ALLOW_UNMANAGED
10615  #define XPC_EVENT_FLAG_ALLOW_UNMANAGED (1 << 1)
10616  #endif
10617  	
10618  int
10619  xpc_event_set_event(job_t j, xpc_object_t request, xpc_object_t *reply)
10620  {
10621  	const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10622  	if (!stream) {
10623  		return EXINVAL;
10624  	}
10625  
10626  	const char *key = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_NAME);
10627  	if (!key) {
10628  		return EXINVAL;
10629  	}
10630  
10631  	xpc_object_t event = xpc_dictionary_get_value(request, XPC_EVENT_ROUTINE_KEY_EVENT);
10632  	if (event && xpc_get_type(event) != XPC_TYPE_DICTIONARY) {
10633  		return EXINVAL;
10634  	}
10635  
10636  	uint64_t flags = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_FLAGS);
10637  
10638  	/* Don't allow events to be set for anonymous jobs unless specifically
10639  	 * requested in the flags. Only permit this for internal development.
10640  	 */
10641  	if (j->anonymous && ((flags & XPC_EVENT_FLAG_ALLOW_UNMANAGED) == 0 || !launchd_apple_internal)) {
10642  		job_log(j, LOG_ERR, "Unmanaged jobs may not make XPC Events requests.");
10643  		return EPERM;
10644  	}
10645  
10646  	job_log(j, LOG_DEBUG, "%s event for stream/key: %s/%s", event ? "Setting" : "Removing", stream, key);
10647  
10648  	struct externalevent *eei = NULL;
10649  	LIST_FOREACH(eei, &j->events, job_le) {
10650  		/* If the event for the given key already exists for the job, we need to
10651  		 * remove the old one first.
10652  		 */
10653  		if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
10654  			job_log(j, LOG_DEBUG, "Event exists. Removing.");
10655  			externalevent_delete(eei);
10656  			break;
10657  		}
10658  	}
10659  
10660  	int result = EXNOMEM;
10661  	if (event) {
10662  		struct eventsystem *es = eventsystem_find(stream);
10663  		if (!es) {
10664  			job_log(j, LOG_DEBUG, "Creating stream.");
10665  			es = eventsystem_new(stream);
10666  		}
10667  
10668  		if (es) {
10669  			job_log(j, LOG_DEBUG, "Adding event.");
10670  			if (externalevent_new(j, es, key, event, flags)) {
10671  				job_log(j, LOG_DEBUG, "Added new event for key: %s", key);
10672  				result = 0;
10673  			} else {
10674  				job_log(j, LOG_ERR, "Could not create event for key: %s", key);
10675  			}
10676  		} else {
10677  			job_log(j, LOG_ERR, "Event stream could not be created: %s", stream);
10678  		}
10679  	} else {
10680  		/* If the event was NULL, then we just remove it and return. */
10681  		result = 0;
10682  	}
10683  
10684  	if (result == 0) {
10685  		xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10686  		*reply = reply2;
10687  	}
10688  
10689  	return result;
10690  }
10691  
10692  int
10693  xpc_event_copy_event(job_t j, xpc_object_t request, xpc_object_t *reply)
10694  {
10695  	const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10696  	const char *key = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_NAME);
10697  
10698  	bool all_streams = (stream == NULL);
10699  	bool all_events = (key == NULL || strcmp(key, "") == 0); // strcmp for libxpc compatibility
10700  	xpc_object_t events = NULL;
10701  
10702  	if (all_streams && !all_events) {
10703  		return EXINVAL;
10704  	}
10705  
10706  	if (all_streams || all_events) {
10707  		job_log(j, LOG_DEBUG, "Fetching all events%s%s", stream ? " for stream: " : "", stream ? stream : "");
10708  		events = xpc_dictionary_create(NULL, NULL, 0);
10709  	} else {
10710  		job_log(j, LOG_DEBUG, "Fetching stream/key: %s/%s", stream, key);
10711  	}
10712  
10713  	int result = ESRCH;
10714  	struct externalevent *eei = NULL;
10715  	LIST_FOREACH(eei, &j->events, job_le) {
10716  		if (all_streams) {
10717  			xpc_object_t sub = xpc_dictionary_get_value(events, eei->sys->name);
10718  			if (sub == NULL) {
10719  				sub = xpc_dictionary_create(NULL, NULL, 0);
10720  				xpc_dictionary_set_value(events, eei->sys->name, sub);
10721  				xpc_release(sub);
10722  			}
10723  			xpc_dictionary_set_value(sub, eei->name, eei->event);
10724  		} else if (strcmp(eei->sys->name, stream) == 0) {
10725  			if (all_events) {
10726  				xpc_dictionary_set_value(events, eei->name, eei->event);
10727  			} else if (strcmp(eei->name, key) == 0) {
10728  				job_log(j, LOG_DEBUG, "Found event.");
10729  				events = xpc_retain(eei->event);
10730  				break;
10731  			}
10732  		}
10733  	}
10734  
10735  	if (events) {
10736  		xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10737  		xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_EVENT, events);
10738  		xpc_release(events);
10739  
10740  		*reply = reply2;
10741  		result = 0;
10742  	}
10743  
10744  	return result;
10745  }
10746  
10747  int
10748  xpc_event_channel_check_in(job_t j, xpc_object_t request, xpc_object_t *reply)
10749  {
10750  	const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10751  	if (!stream) {
10752  		return EXINVAL;
10753  	}
10754  
10755  	job_log(j, LOG_DEBUG, "Checking in stream: %s", stream);
10756  
10757  	struct machservice *ms = NULL;
10758  	int error = xpc_event_find_channel(j, stream, &ms);
10759  	if (error) {
10760  		job_log(j, LOG_ERR, "Failed to check in: 0x%x: %s", error, xpc_strerror(error));
10761  	} else if (ms->isActive) {
10762  		job_log(j, LOG_ERR, "Attempt to check in on event channel multiple times: %s", stream);
10763  		error = EBUSY;
10764  	} else {
10765  		machservice_request_notifications(ms);
10766  
10767  		xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10768  		xpc_dictionary_set_mach_recv(reply2, XPC_EVENT_ROUTINE_KEY_PORT, ms->port);
10769  		*reply = reply2;
10770  		error = 0;
10771  	}
10772  
10773  	return error;
10774  }
10775  
10776  int
10777  xpc_event_channel_look_up(job_t j, xpc_object_t request, xpc_object_t *reply)
10778  {
10779  	if (!j->event_monitor) {
10780  		return EPERM;
10781  	}
10782  
10783  	const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10784  	if (!stream) {
10785  		return EXINVAL;
10786  	}
10787  
10788  	uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10789  	if (!token) {
10790  		return EXINVAL;
10791  	}
10792  
10793  	job_log(j, LOG_DEBUG, "Looking up channel for stream/token: %s/%llu", stream, token);
10794  
10795  	struct externalevent *ee = externalevent_find(stream, token);
10796  	if (!ee) {
10797  		return ESRCH;
10798  	}
10799  
10800  	struct machservice *ms = NULL;
10801  	int error = xpc_event_find_channel(ee->job, stream, &ms);
10802  	if (!error) {
10803  		job_log(j, LOG_DEBUG, "Found event channel port: 0x%x", ms->port);
10804  		xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10805  		xpc_dictionary_set_mach_send(reply2, XPC_EVENT_ROUTINE_KEY_PORT, ms->port);
10806  		*reply = reply2;
10807  		error = 0;
10808  	} else {
10809  		job_log(j, LOG_ERR, "Could not find event channel for stream/token: %s/%llu: 0x%x: %s", stream, token, error, xpc_strerror(error));
10810  	}
10811  
10812  	return error;
10813  }
10814  
10815  int
10816  xpc_event_provider_check_in(job_t j, xpc_object_t request, xpc_object_t *reply)
10817  {
10818  	if (!j->event_monitor) {
10819  		return EPERM;
10820  	}
10821  
10822  	/* This indicates that the event monitor is now safe to signal. This state
10823  	 * is independent of whether this operation actually succeeds; we just need
10824  	 * it to ignore SIGUSR1.
10825  	 */
10826  	j->event_monitor_ready2signal = true;
10827  
10828  	const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10829  	if (!stream) {
10830  		return EXINVAL;
10831  	}
10832  
10833  	job_log(j, LOG_DEBUG, "Provider checking in for stream: %s", stream);
10834  
10835  	xpc_object_t events = xpc_array_create(NULL, 0);
10836  	struct eventsystem *es = eventsystem_find(stream);
10837  	if (!es) {
10838  		/* If we had to create the event stream, there were no events, so just
10839  		 * give back the empty array.
10840  		 */
10841  		job_log(j, LOG_DEBUG, "Creating event stream.");
10842  		es = eventsystem_new(stream);
10843  		if (!job_assumes(j, es)) {
10844  			xpc_release(events);
10845  			return EXNOMEM;
10846  		}
10847  
10848  		if (strcmp(stream, "com.apple.launchd.helper") == 0) {
10849  			_launchd_support_system = es;
10850  		}
10851  	} else {
10852  		job_log(j, LOG_DEBUG, "Filling event array.");
10853  
10854  		struct externalevent *ei = NULL;
10855  		LIST_FOREACH(ei, &es->events, sys_le) {
10856  			xpc_array_set_uint64(events, XPC_ARRAY_APPEND, ei->id);
10857  			xpc_array_append_value(events, ei->event);
10858  		}
10859  	}
10860  
10861  	xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10862  	xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_EVENTS, events);
10863  	xpc_release(events);
10864  	*reply = reply2;
10865  
10866  	return 0;
10867  }
10868  
10869  int
10870  xpc_event_provider_set_state(job_t j, xpc_object_t request, xpc_object_t *reply)
10871  {
10872  	job_t other_j = NULL;
10873  
10874  	if (!j->event_monitor) {
10875  		return EPERM;
10876  	}
10877  
10878  	const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10879  	if (!stream) {
10880  		return EXINVAL;
10881  	}
10882  
10883  	uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10884  	if (!token) {
10885  		return EXINVAL;
10886  	}
10887  
10888  	bool state = false;
10889  	xpc_object_t xstate = xpc_dictionary_get_value(request, XPC_EVENT_ROUTINE_KEY_STATE);
10890  	if (!xstate || xpc_get_type(xstate) != XPC_TYPE_BOOL) {
10891  		return EXINVAL;
10892  	} else {
10893  		state = xpc_bool_get_value(xstate);
10894  	}
10895  
10896  	job_log(j, LOG_DEBUG, "Setting event state to %s for stream/token: %s/%llu", state ? "true" : "false", stream, token);
10897  
10898  	struct externalevent *ei = externalevent_find(stream, token);
10899  	if (!ei) {
10900  		job_log(j, LOG_ERR, "Could not find stream/token: %s/%llu", stream, token);
10901  		return ESRCH;
10902  	}
10903  
10904  	other_j = ei->job;
10905  	ei->state = state;
10906  
10907  	if (ei->internal) {
10908  		job_log(ei->job, LOG_NOTICE, "Job should be able to exec(3) now.");
10909  		ei->job->waiting4ok = false;
10910  		externalevent_delete(ei);
10911  	}
10912  
10913  	(void)job_dispatch(other_j, false);
10914  
10915  	xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10916  	*reply = reply2;
10917  
10918  	return 0;
10919  }
10920  
10921  bool
10922  xpc_event_demux(mach_port_t p, xpc_object_t request, xpc_object_t *reply)
10923  {
10924  	uint64_t op = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_OP);
10925  	if (!op) {
10926  		return false;
10927  	}
10928  
10929  	audit_token_t token;
10930  	xpc_dictionary_get_audit_token(request, &token);
10931  	runtime_record_caller_creds(&token);
10932  
10933  	struct ldcred *ldc = runtime_get_caller_creds();
10934  	job_t j = managed_job(ldc->pid);
10935  	if (!j) {
10936  		j = job_mig_intran(p);
10937  		if (!j) {
10938  			op = -1;
10939  		}
10940  	}
10941  
10942  	job_log(j, LOG_DEBUG, "Incoming XPC event request: %llu", op);
10943  
10944  	int error = -1;
10945  	switch (op) {
10946  	case XPC_EVENT_GET_NAME:
10947  		error = xpc_event_get_event_name(j, request, reply);
10948  		break;
10949  	case XPC_EVENT_SET:
10950  		error = xpc_event_set_event(j, request, reply);
10951  		break;
10952  	case XPC_EVENT_COPY:
10953  		error = xpc_event_copy_event(j, request, reply);
10954  		break;
10955  	case XPC_EVENT_CHECK_IN:
10956  		error = xpc_event_channel_check_in(j, request, reply);
10957  		break;
10958  	case XPC_EVENT_LOOK_UP:
10959  		error = xpc_event_channel_look_up(j, request, reply);
10960  		break;
10961  	case XPC_EVENT_PROVIDER_CHECK_IN:
10962  		error = xpc_event_provider_check_in(j, request, reply);
10963  		break;
10964  	case XPC_EVENT_PROVIDER_SET_STATE:
10965  		error = xpc_event_provider_set_state(j, request, reply);
10966  		break;
10967  	case XPC_EVENT_COPY_ENTITLEMENTS:
10968  		error = xpc_event_copy_entitlements(j, request, reply);
10969  		break;
10970  	case -1:
10971  		error = EINVAL;
10972  		break;
10973  	default:
10974  		job_log(j, LOG_ERR, "Bogus opcode.");
10975  		error = EDOM;
10976  	}
10977  
10978  	if (error) {
10979  		xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10980  		xpc_dictionary_set_uint64(reply2, XPC_EVENT_ROUTINE_KEY_ERROR, error);
10981  		*reply = reply2;
10982  	}
10983  
10984  	return true;
10985  }
10986  
10987  uint64_t
10988  xpc_get_jetsam_entitlement(const char *key)
10989  {
10990  	uint64_t entitlement = 0;
10991  
10992  	audit_token_t *token = runtime_get_caller_token();
10993  	xpc_object_t value = xpc_copy_entitlement_for_token(key, token);
10994  	if (value) {
10995  		if (xpc_get_type(value) == XPC_TYPE_UINT64) {
10996  			entitlement = xpc_uint64_get_value(value);
10997  		}
10998  
10999  		xpc_release(value);
11000  	}
11001  	
11002  	return entitlement;	
11003  }
11004  
11005  int
11006  xpc_process_set_jetsam_band(job_t j, xpc_object_t request, xpc_object_t *reply)
11007  {	
11008  	if (!j) {
11009  		return EINVAL;
11010  	}
11011  
11012  	const char *label = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_LABEL);
11013  	if (!label) {
11014  		return EXINVAL;
11015  	}
11016  
11017  	xpc_jetsam_band_t entitled_band = -1;
11018  	xpc_jetsam_band_t requested_band = (xpc_jetsam_band_t)xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_PRIORITY_BAND);
11019  	if (!requested_band) {
11020  		return EXINVAL;
11021  	}
11022  
11023  	if (!(requested_band >= XPC_JETSAM_BAND_SUSPENDED && requested_band < XPC_JETSAM_BAND_LAST)) {
11024  		return EXINVAL;
11025  	}
11026  
11027  	uint64_t rcdata = xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_RCDATA);
11028  	
11029  	job_t tj = job_find(root_jobmgr, label);
11030  	if (!tj) {
11031  		return EXSRCH;
11032  	}
11033  
11034  	boolean_t allow = false;
11035  	if (j->embedded_god) {
11036  		allow = true;
11037  	} else {
11038  		entitled_band = xpc_get_jetsam_entitlement("com.apple.private.jetsam.modify-priority");
11039  		if (entitled_band >= requested_band) {
11040  			allow = true;
11041  		}
11042  	}
11043  
11044  	if (!allow) {
11045  		if (launchd_no_jetsam_perm_check) {
11046  			job_log(j, LOG_NOTICE, "Jetsam priority checks disabled; allowing job to set priority: %d", requested_band);
11047  		} else {		
11048  			job_log(j, LOG_ERR, "Job cannot decrease Jetsam priority band (requested/maximum): %d/%d", requested_band, entitled_band);
11049  			return EPERM;
11050  		}
11051  	}
11052  
11053  	job_log(j, LOG_INFO, "Setting Jetsam band: %d.", requested_band);
11054  	job_update_jetsam_properties(tj, requested_band, rcdata);
11055  
11056  	xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11057  	*reply = reply2;
11058  
11059  	return 0;
11060  }
11061  
11062  int
11063  xpc_process_set_jetsam_memory_limit(job_t j, xpc_object_t request, xpc_object_t *reply)
11064  {
11065  	if (!j) {
11066  		return EINVAL;
11067  	}
11068  
11069  	const char *label = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_LABEL);
11070  	if (!label) {
11071  		return EXINVAL;
11072  	}
11073  
11074  	int32_t entitlement_limit = 0;
11075  	int32_t requested_limit = (int32_t)xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_MEMORY_LIMIT);
11076  
11077  	job_t tj = job_find(root_jobmgr, label);
11078  	if (!tj) {
11079  		return EXSRCH;
11080  	}
11081  
11082  	boolean_t allow = false;
11083  	if (j->embedded_god) {
11084  		allow = true;
11085  	} else {
11086  		entitlement_limit = (int32_t)xpc_get_jetsam_entitlement("com.apple.private.jetsam.memory_limit");		
11087  		if (entitlement_limit >= requested_limit) {
11088  			allow = true;
11089  		}
11090  	}
11091  
11092  	if (!allow) {
11093  		if (launchd_no_jetsam_perm_check) {
11094  			job_log(j, LOG_NOTICE, "Jetsam priority checks disabled; allowing job to set memory limit: %d", requested_limit);
11095  		} else {		
11096  			job_log(j, LOG_ERR, "Job cannot set Jetsam memory limit (requested/maximum): %d/%d", requested_limit, entitlement_limit);
11097  			return EPERM;
11098  		}
11099  	}
11100  
11101  	job_log(j, LOG_INFO, "Setting Jetsam memory limit: %d.", requested_limit);
11102  	job_update_jetsam_memory_limit(tj, requested_limit);
11103  
11104  	xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11105  	*reply = reply2;
11106  
11107  	return 0;
11108  }
11109  
11110  static jobmgr_t
11111  _xpc_process_find_target_manager(job_t j, xpc_service_type_t type, pid_t pid)
11112  {
11113  	jobmgr_t target = NULL;
11114  	if (type == XPC_SERVICE_TYPE_BUNDLED) {
11115  		job_log(j, LOG_DEBUG, "Bundled service. Searching for XPC domains for PID: %d", pid);
11116  
11117  		jobmgr_t jmi = NULL;
11118  		SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
11119  			if (jmi->req_pid && jmi->req_pid == pid) {
11120  				jobmgr_log(jmi, LOG_DEBUG, "Found job manager for PID.");
11121  				target = jmi;
11122  				break;
11123  			}
11124  		}
11125  	} else if (type == XPC_SERVICE_TYPE_LAUNCHD || type == XPC_SERVICE_TYPE_APP) {
11126  		target = j->mgr;
11127  	}
11128  
11129  	return target;
11130  }
11131  
11132  static int
11133  xpc_process_attach(job_t j, xpc_object_t request, xpc_object_t *reply)
11134  {
11135  	if (!j) {
11136  		return EINVAL;
11137  	}
11138  
11139  	audit_token_t *token = runtime_get_caller_token();
11140  	xpc_object_t entitlement = xpc_copy_entitlement_for_token(XPC_SERVICE_ENTITLEMENT_ATTACH, token);
11141  	if (!entitlement) {
11142  		job_log(j, LOG_ERR, "Job does not have entitlement: %s", XPC_SERVICE_ENTITLEMENT_ATTACH);
11143  		return EPERM;
11144  	}
11145  
11146  	if (entitlement != XPC_BOOL_TRUE) {
11147  		char *desc = xpc_copy_description(entitlement);
11148  		job_log(j, LOG_ERR, "Job has bad value for entitlement: %s:\n%s", XPC_SERVICE_ENTITLEMENT_ATTACH, desc);
11149  		free(desc);
11150  
11151  		xpc_release(entitlement);
11152  		return EPERM;
11153  	}
11154  
11155  	const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11156  	if (!name) {
11157  		return EXINVAL;
11158  	}
11159  
11160  	xpc_service_type_t type = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_TYPE);
11161  	if (!type) {
11162  		return EXINVAL;
11163  	}
11164  
11165  	mach_port_t port = xpc_dictionary_copy_mach_send(request, XPC_PROCESS_ROUTINE_KEY_NEW_INSTANCE_PORT);
11166  	if (!MACH_PORT_VALID(port)) {
11167  		return EXINVAL;
11168  	}
11169  
11170  	pid_t pid = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_HANDLE);
11171  
11172  	job_log(j, LOG_DEBUG, "Attaching to service: %s", name);
11173  
11174  	xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11175  	jobmgr_t target = _xpc_process_find_target_manager(j, type, pid);
11176  	if (target) {
11177  		jobmgr_log(target, LOG_DEBUG, "Found target job manager for service: %s", name);
11178  		(void)jobmgr_assumes(target, waiting4attach_new(target, name, port, 0, type));
11179  
11180  		/* HACK: This is awful. For legacy reasons, launchd job labels are all
11181  		 * stored in a global namespace, which is stored in the root job
11182  		 * manager. But XPC domains have a per-domain namespace. So if we're
11183  		 * looking for a legacy launchd job, we have to redirect any attachment
11184  		 * attempts to the root job manager to find existing instances.
11185  		 *
11186  		 * But because we store attachments on a per-job manager basis, we have
11187  		 * to create the new attachment in the actual target job manager, hence
11188  		 * why we change the target only after we've created the attachment.
11189  		 */
11190  		if (strcmp(target->name, VPROCMGR_SESSION_AQUA) == 0) {
11191  			target = root_jobmgr;
11192  		}
11193  
11194  		job_t existing = job_find(target, name);
11195  		if (existing && existing->p) {
11196  			job_log(existing, LOG_DEBUG, "Found existing instance of service.");
11197  			xpc_dictionary_set_int64(reply2, XPC_PROCESS_ROUTINE_KEY_PID, existing->p);
11198  		} else {
11199  			xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, ESRCH);
11200  		}
11201  	} else if (type == XPC_SERVICE_TYPE_BUNDLED) {
11202  		(void)job_assumes(j, waiting4attach_new(target, name, port, pid, type));
11203  		xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, ESRCH);
11204  	} else {
11205  		xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, EXSRCH);
11206  	}
11207  
11208  	*reply = reply2;
11209  	return 0;
11210  }
11211  
11212  static int
11213  xpc_process_detach(job_t j, xpc_object_t request, xpc_object_t *reply __unused)
11214  {
11215  	if (!j) {
11216  		return EINVAL;
11217  	}
11218  
11219  	const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11220  	if (!name) {
11221  		return EXINVAL;
11222  	}
11223  
11224  	xpc_service_type_t type = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_TYPE);
11225  	if (!type) {
11226  		return EXINVAL;
11227  	}
11228  
11229  	job_log(j, LOG_DEBUG, "Deatching from service: %s", name);
11230  
11231  	pid_t pid = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_PID);
11232  	jobmgr_t target = _xpc_process_find_target_manager(j, type, pid);
11233  	if (target) {
11234  		jobmgr_log(target, LOG_DEBUG, "Found target job manager for service: %s", name);
11235  
11236  		struct waiting4attach *w4ai = NULL;
11237  		struct waiting4attach *w4ait = NULL;
11238  		LIST_FOREACH_SAFE(w4ai, &target->attaches, le, w4ait) {
11239  			if (strcmp(name, w4ai->name) == 0) {
11240  				jobmgr_log(target, LOG_DEBUG, "Found attachment. Deleting.");
11241  				waiting4attach_delete(target, w4ai);
11242  				break;
11243  			}
11244  		}
11245  	}
11246  
11247  	return 0;
11248  }
11249  
11250  static int
11251  xpc_process_get_properties(job_t j, xpc_object_t request, xpc_object_t *reply)
11252  {
11253  	if (j->anonymous) {
11254  		/* Total hack. libxpc will send requests to the pipe created out of the
11255  		 * process' bootstrap port, so when job_mig_intran() tries to resolve
11256  		 * the process into a job, it'll wind up creating an anonymous job if
11257  		 * the requestor was an XPC service, whose job manager is an XPC domain.
11258  		 */
11259  		pid_t pid = j->p;
11260  		jobmgr_t jmi = NULL;
11261  		SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
11262  			if ((j = jobmgr_find_by_pid(jmi, pid, false))) {
11263  				break;
11264  			}
11265  		}
11266  	}
11267  
11268  	if (!j || j->anonymous) {
11269  		return EXINVAL;
11270  	}
11271  
11272  	struct waiting4attach *w4a = waiting4attach_find(j->mgr, j);
11273  	if (!w4a) {
11274  		return EXINVAL;
11275  	}
11276  
11277  	xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11278  	xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_TYPE, w4a->type);
11279  	xpc_dictionary_set_mach_send(reply2, XPC_PROCESS_ROUTINE_KEY_NEW_INSTANCE_PORT, w4a->port);
11280  	if (j->prog) {
11281  		xpc_dictionary_set_string(reply2, XPC_PROCESS_ROUTINE_KEY_PATH, j->prog);
11282  	} else {
11283  		xpc_dictionary_set_string(reply2, XPC_PROCESS_ROUTINE_KEY_PATH, j->argv[0]);
11284  	}
11285  
11286  	if (j->argv) {
11287  		xpc_object_t xargv = xpc_array_create(NULL, 0);
11288  
11289  		size_t i = 0;
11290  		for (i = 0; i < j->argc; i++) {
11291  			if (j->argv[i]) {
11292  				xpc_array_set_string(xargv, XPC_ARRAY_APPEND, j->argv[i]);
11293  			}
11294  		}
11295  
11296  		xpc_dictionary_set_value(reply2, XPC_PROCESS_ROUTINE_KEY_ARGV, xargv);
11297  		xpc_release(xargv);
11298  	}
11299  
11300  	*reply = reply2;
11301  	return 0;
11302  }
11303  
11304  static int
11305  xpc_process_service_kill(job_t j, xpc_object_t request, xpc_object_t *reply)
11306  {
11307  #if XPC_LPI_VERSION >= 20130426
11308  	if (!j) {
11309  		return ESRCH;
11310  	}
11311  
11312  	jobmgr_t jm = _xpc_process_find_target_manager(j, XPC_SERVICE_TYPE_BUNDLED, j->p);
11313  	if (!jm) {
11314  		return ENOENT;
11315  	}
11316  
11317  	const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11318  	if (!name) {
11319  		return EINVAL;
11320  	}
11321  
11322  	int64_t whichsig = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_SIGNAL);
11323  	if (!whichsig) {
11324  		return EINVAL;
11325  	}
11326  
11327  	job_t j2kill = job_find(jm, name);
11328  	if (!j2kill) {
11329  		return ESRCH;
11330  	}
11331  
11332  	if (j2kill->alias) {
11333  		// Only allow for private instances to be killed.
11334  		return EPERM;
11335  	}
11336  
11337  	struct proc_bsdshortinfo proc;
11338  	if (proc_pidinfo(j2kill->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
11339  		if (errno != ESRCH) {
11340  			(void)jobmgr_assumes_zero(root_jobmgr, errno);
11341  		}
11342  
11343  		return errno;
11344  	}
11345  
11346  	struct ldcred *ldc = runtime_get_caller_creds();
11347  	if (proc.pbsi_uid != ldc->euid) {
11348  		// Do not allow non-root to kill RoleAccount services running as a
11349  		// different user.
11350  		return EPERM;
11351  	}
11352  
11353  	if (!j2kill->p) {
11354  		return EALREADY;
11355  	}
11356  
11357  	xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11358  	if (!reply2) {
11359  		return EINVAL;
11360  	}
11361  
11362  	int error = 0;
11363  	int ret = kill(j2kill->p, whichsig);
11364  	if (ret) {
11365  		error = errno;
11366  	}
11367  
11368  	xpc_dictionary_set_int64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, error);
11369  	*reply = reply2;
11370  	return 0;
11371  #else
11372  	return ENOTSUP;
11373  #endif
11374  }
11375  
11376  bool
11377  xpc_process_demux(mach_port_t p, xpc_object_t request, xpc_object_t *reply)
11378  {
11379  	uint64_t op = xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_OP);
11380  	if (!op) {
11381  		return false;
11382  	}
11383  
11384  	audit_token_t token;
11385  	xpc_dictionary_get_audit_token(request, &token);
11386  	runtime_record_caller_creds(&token);
11387  
11388  	job_t j = job_mig_intran(p);
11389  	job_log(j, LOG_DEBUG, "Incoming XPC process request: %llu", op);
11390  
11391  	int error = -1;
11392  	switch (op) {
11393  	case XPC_PROCESS_JETSAM_SET_BAND:
11394  		error = xpc_process_set_jetsam_band(j, request, reply);
11395  		break;
11396  	case XPC_PROCESS_JETSAM_SET_MEMORY_LIMIT:
11397  		error = xpc_process_set_jetsam_memory_limit(j, request, reply);
11398  		break;
11399  	case XPC_PROCESS_SERVICE_ATTACH:
11400  		error = xpc_process_attach(j, request, reply);
11401  		break;
11402  	case XPC_PROCESS_SERVICE_DETACH:
11403  		error = xpc_process_detach(j, request, reply);
11404  		break;
11405  	case XPC_PROCESS_SERVICE_GET_PROPERTIES:
11406  		error = xpc_process_get_properties(j, request, reply);
11407  		break;
11408  	case XPC_PROCESS_SERVICE_KILL:
11409  		error = xpc_process_service_kill(j, request, reply);
11410  		break;
11411  	default:
11412  		job_log(j, LOG_ERR, "Bogus process opcode.");
11413  		error = EDOM;
11414  	}
11415  
11416  	if (error) {
11417  		xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11418  		if (reply2) {
11419  			xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, error);
11420  		}
11421  
11422  		*reply = reply2;
11423  	}
11424  
11425  	return true;
11426  }
11427  
11428  kern_return_t
11429  job_mig_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, unsigned int flags)
11430  {
11431  	struct ldcred *ldc = runtime_get_caller_creds();
11432  	job_t otherj;
11433  
11434  	if (!j) {
11435  		return BOOTSTRAP_NO_MEMORY;
11436  	}
11437  
11438  	if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
11439  		return BOOTSTRAP_UNKNOWN_SERVICE;
11440  	}
11441  
11442  #if TARGET_OS_EMBEDDED
11443  	bool allow_non_root_kickstart = j->username && otherj->username && (strcmp(j->username, otherj->username) == 0);
11444  #else
11445  	bool allow_non_root_kickstart = false;
11446  #endif
11447  
11448  	if (ldc->euid != 0 && ldc->euid != geteuid() && !allow_non_root_kickstart) {
11449  		return BOOTSTRAP_NOT_PRIVILEGED;
11450  	}
11451  
11452  #if HAVE_SANDBOX
11453  	if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
11454  		return BOOTSTRAP_NOT_PRIVILEGED;
11455  	}
11456  #endif
11457  
11458  	if (otherj->p && (flags & VPROCFLAG_STALL_JOB_EXEC)) {
11459  		return BOOTSTRAP_SERVICE_ACTIVE;
11460  	}
11461  
11462  	otherj->stall_before_exec = (flags & VPROCFLAG_STALL_JOB_EXEC);
11463  	otherj = job_dispatch(otherj, true);
11464  
11465  	if (!job_assumes(j, otherj && otherj->p)) {
11466  		// <rdar://problem/6787083> Clear this flag if we failed to start the job.
11467  		otherj->stall_before_exec = false;
11468  		return BOOTSTRAP_NO_MEMORY;
11469  	}
11470  
11471  	*out_pid = otherj->p;
11472  
11473  	return 0;
11474  }
11475  
11476  kern_return_t
11477  job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj)
11478  {
11479  	launch_data_t jobdata = NULL;
11480  	size_t data_offset = 0;
11481  	struct ldcred *ldc = runtime_get_caller_creds();
11482  	job_t jr;
11483  
11484  	if (!j) {
11485  		return BOOTSTRAP_NO_MEMORY;
11486  	}
11487  
11488  	if (unlikely(j->deny_job_creation)) {
11489  		return BOOTSTRAP_NOT_PRIVILEGED;
11490  	}
11491  
11492  #if HAVE_SANDBOX
11493  	if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
11494  		return BOOTSTRAP_NOT_PRIVILEGED;
11495  	}
11496  #endif
11497  
11498  	if (unlikely(pid1_magic && ldc->euid && ldc->uid)) {
11499  		job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
11500  		return VPROC_ERR_TRY_PER_USER;
11501  	}
11502  
11503  	if (!job_assumes(j, indataCnt != 0)) {
11504  		return 1;
11505  	}
11506  
11507  	runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
11508  	if (!job_assumes(j, (jobdata = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
11509  		return 1;
11510  	}
11511  
11512  	jobmgr_t target_jm = jobmgr_find_by_name(j->mgr, NULL);
11513  	if (!jobmgr_assumes(j->mgr, target_jm != NULL)) {
11514  		jobmgr_log(j->mgr, LOG_ERR, "This API can only be used by a process running within an Aqua session.");
11515  		return 1;
11516  	}
11517  
11518  	jr = jobmgr_import2(target_jm ?: j->mgr, jobdata);
11519  
11520  	launch_data_t label = NULL;
11521  	launch_data_t wait4debugger = NULL;
11522  	if (!jr) {
11523  		switch (errno) {
11524  		case EEXIST:
11525  			/* If EEXIST was returned, we know that there is a label string in
11526  			 * the dictionary. So we don't need to check the types here; that
11527  			 * has already been done.
11528  			 */
11529  			label = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_LABEL);
11530  			jr = job_find(NULL, launch_data_get_string(label));
11531  			if (job_assumes(j, jr != NULL) && !jr->p) {
11532  				wait4debugger = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_WAITFORDEBUGGER);
11533  				if (wait4debugger && launch_data_get_type(wait4debugger) == LAUNCH_DATA_BOOL) {
11534  					if (launch_data_get_bool(wait4debugger)) {
11535  						/* If the job exists, we're going to kick-start it, but
11536  						 * we need to give the caller the opportunity to start
11537  						 * it suspended if it so desires. But this will only
11538  						 * take effect if the job isn't running.
11539  						 */
11540  						jr->wait4debugger_oneshot = true;
11541  					}
11542  				}
11543  			}
11544  
11545  			*outj = jr;
11546  			return BOOTSTRAP_NAME_IN_USE;
11547  		default:
11548  			return BOOTSTRAP_NO_MEMORY;
11549  		}
11550  	}
11551  
11552  	if (pid1_magic) {
11553  		jr->mach_uid = ldc->uid;
11554  	}
11555  
11556  	// TODO: Consolidate the app and legacy_LS_job bits.
11557  	jr->legacy_LS_job = true;
11558  	jr->abandon_pg = true;
11559  	jr->asport = asport;
11560  	jr->app = true;
11561  	uuid_clear(jr->expected_audit_uuid);
11562  	jr = job_dispatch(jr, true);
11563  
11564  	if (!job_assumes(j, jr != NULL)) {
11565  		job_remove(jr);
11566  		return BOOTSTRAP_NO_MEMORY;
11567  	}
11568  
11569  	if (!job_assumes(jr, jr->p)) {
11570  		job_remove(jr);
11571  		return BOOTSTRAP_NO_MEMORY;
11572  	}
11573  
11574  	job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
11575  	*outj = jr;
11576  
11577  	return BOOTSTRAP_SUCCESS;
11578  }
11579  
11580  kern_return_t
11581  job_mig_spawn2(job_t j, mach_port_t rp, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, pid_t *child_pid, mach_port_t *obsvr_port)
11582  {
11583  	job_t nj = NULL;
11584  	kern_return_t kr = job_mig_spawn_internal(j, indata, indataCnt, asport, &nj);
11585  	if (likely(kr == KERN_SUCCESS)) {
11586  		if (job_setup_exit_port(nj) != KERN_SUCCESS) {
11587  			job_remove(nj);
11588  			kr = BOOTSTRAP_NO_MEMORY;
11589  		} else {
11590  			/* Do not return until the job has called exec(3), thereby making it
11591  			 * safe for the caller to send it SIGCONT.
11592  			 *
11593  			 * <rdar://problem/9042798>
11594  			 */
11595  			nj->spawn_reply_port = rp;
11596  			kr = MIG_NO_REPLY;
11597  		}
11598  	} else if (kr == BOOTSTRAP_NAME_IN_USE) {
11599  		bool was_running = nj->p;
11600  		if (job_dispatch(nj, true)) {
11601  			if (!was_running) {
11602  				job_log(nj, LOG_DEBUG, "Job exists but is not running. Kick-starting.");
11603  
11604  				if (job_setup_exit_port(nj) == KERN_SUCCESS) {
11605  					nj->spawn_reply_port = rp;
11606  					kr = MIG_NO_REPLY;
11607  				} else {
11608  					kr = BOOTSTRAP_NO_MEMORY;
11609  				}
11610  			} else {
11611  				*obsvr_port = MACH_PORT_NULL;
11612  				*child_pid = nj->p;
11613  				kr = KERN_SUCCESS;
11614  			}
11615  		} else {
11616  			job_log(nj, LOG_ERR, "Failed to dispatch job, requestor: %s", j->label);
11617  			kr = BOOTSTRAP_UNKNOWN_SERVICE;
11618  		}
11619  	}
11620  
11621  	mig_deallocate(indata, indataCnt);
11622  	return kr;
11623  }
11624  
11625  launch_data_t
11626  job_do_legacy_ipc_request(job_t j, launch_data_t request, mach_port_t asport __attribute__((unused)))
11627  {
11628  	launch_data_t reply = NULL;
11629  
11630  	errno = ENOTSUP;
11631  	if (launch_data_get_type(request) == LAUNCH_DATA_STRING) {
11632  		if (strcmp(launch_data_get_string(request), LAUNCH_KEY_CHECKIN) == 0) {
11633  			reply = job_export(j);
11634  			job_checkin(j);
11635  		}
11636  	}
11637  
11638  	return reply;
11639  }
11640  
11641  #define LAUNCHD_MAX_LEGACY_FDS 128
11642  #define countof(x) (sizeof((x)) / sizeof((x[0])))
11643  
11644  kern_return_t
11645  job_mig_legacy_ipc_request(job_t j, vm_offset_t request, 
11646  	mach_msg_type_number_t requestCnt, mach_port_array_t request_fds,
11647  	mach_msg_type_number_t request_fdsCnt, vm_offset_t *reply,
11648  	mach_msg_type_number_t *replyCnt, mach_port_array_t *reply_fdps,
11649  	mach_msg_type_number_t *reply_fdsCnt, mach_port_t asport)
11650  {
11651  	if (!j) {
11652  		return BOOTSTRAP_NO_MEMORY;
11653  	}
11654  
11655  	/* TODO: Once we support actions other than checking in, we must check the
11656  	 * sandbox capabilities and EUID of the requestort.
11657  	 */
11658  	size_t nout_fdps = 0;
11659  	size_t nfds = request_fdsCnt / sizeof(request_fds[0]);
11660  	if (nfds > LAUNCHD_MAX_LEGACY_FDS) {
11661  		job_log(j, LOG_ERR, "Too many incoming descriptors: %lu", nfds);
11662  		return BOOTSTRAP_NO_MEMORY;
11663  	}
11664  
11665  	int in_fds[LAUNCHD_MAX_LEGACY_FDS];
11666  	size_t i = 0;
11667  	for (i = 0; i < nfds; i++) {
11668  		in_fds[i] = fileport_makefd(request_fds[i]);
11669  		if (in_fds[i] == -1) {
11670  			job_log(j, LOG_ERR, "Bad descriptor passed in legacy IPC request at index: %lu", i);
11671  		}
11672  	}
11673  
11674  	// DON'T goto outbad before this point.
11675  	*reply = 0;
11676  	*reply_fdps = NULL;
11677  	launch_data_t ldreply = NULL;
11678  
11679  	size_t dataoff = 0;
11680  	size_t fdoff = 0;
11681  	launch_data_t ldrequest = launch_data_unpack((void *)request, requestCnt, in_fds, nfds, &dataoff, &fdoff);
11682  	if (!ldrequest) {
11683  		job_log(j, LOG_ERR, "Invalid legacy IPC request passed.");
11684  		goto out_bad;
11685  	}
11686  
11687  	ldreply = job_do_legacy_ipc_request(j, ldrequest, asport);
11688  	if (!ldreply) {
11689  		ldreply = launch_data_new_errno(errno);
11690  		if (!ldreply) {
11691  			goto out_bad;
11692  		}
11693  	}
11694  
11695  	*replyCnt = 10 * 1024 * 1024;
11696  	mig_allocate(reply, *replyCnt);
11697  	if (!*reply) {
11698  		goto out_bad;
11699  	}
11700  
11701  	int out_fds[LAUNCHD_MAX_LEGACY_FDS];
11702  	size_t nout_fds = 0;
11703  	size_t sz = launch_data_pack(ldreply, (void *)*reply, *replyCnt, out_fds, &nout_fds);
11704  	if (!sz) {
11705  		job_log(j, LOG_ERR, "Could not pack legacy IPC reply.");
11706  		goto out_bad;
11707  	}
11708  
11709  	if (nout_fds) {
11710  		if (nout_fds > 128) {
11711  			job_log(j, LOG_ERR, "Too many outgoing descriptors: %lu", nout_fds);
11712  			goto out_bad;
11713  		}
11714  
11715  		*reply_fdsCnt = nout_fds * sizeof((*reply_fdps)[0]);
11716  		mig_allocate((vm_address_t *)reply_fdps, *reply_fdsCnt);
11717  		if (!*reply_fdps) {
11718  			goto out_bad;
11719  		}
11720  
11721  		for (i = 0; i < nout_fds; i++) {
11722  			mach_port_t fp = MACH_PORT_NULL;
11723  			/* Whatever. Worst case is that we insert MACH_PORT_NULL. Not a big
11724  			 * deal. Note, these get stuffed into an array whose disposition is
11725  			 * mach_port_move_send_t, so we don't have to worry about them after
11726  			 * returning.
11727  			 */
11728  			if (fileport_makeport(out_fds[i], &fp) != 0) {
11729  				job_log(j, LOG_ERR, "Could not pack response descriptor at index: %lu: %d: %s", i, errno, strerror(errno));
11730  			}
11731  			(*reply_fdps)[i] = fp;
11732  		}
11733  
11734  		nout_fdps = nout_fds;
11735  	} else {
11736  		*reply_fdsCnt = 0;
11737  	}
11738  
11739  	mig_deallocate(request, requestCnt);
11740  	launch_data_free(ldreply);
11741  	ldreply = NULL;
11742  
11743  	// Unused for now.
11744  	(void)launchd_mport_deallocate(asport);
11745  
11746  	return BOOTSTRAP_SUCCESS;
11747  
11748  out_bad:
11749  	for (i = 0; i < nfds; i++) {
11750  		(void)close(in_fds[i]);
11751  	}
11752  
11753  	for (i = 0; i < nout_fds; i++) {
11754  		(void)launchd_mport_deallocate((*reply_fdps)[i]);
11755  	}
11756  
11757  	if (*reply) {
11758  		mig_deallocate(*reply, *replyCnt);
11759  	}
11760  
11761  	/* We should never hit this since the last goto out is in the case that
11762  	 * allocating this fails.
11763  	 */
11764  	if (*reply_fdps) {
11765  		mig_deallocate((vm_address_t)*reply_fdps, *reply_fdsCnt);
11766  	}
11767  
11768  	if (ldreply) {
11769  		launch_data_free(ldreply);
11770  	}
11771  
11772  	return BOOTSTRAP_NO_MEMORY;
11773  }
11774  
11775  void
11776  jobmgr_init(bool sflag)
11777  {
11778  	const char *root_session_type = pid1_magic ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
11779  	SLIST_INIT(&s_curious_jobs);
11780  	LIST_INIT(&s_needing_sessions);
11781  
11782  	os_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type, false, MACH_PORT_NULL)) != NULL);
11783  	os_assert((_s_xpc_system_domain = jobmgr_new_xpc_singleton_domain(root_jobmgr, "com.apple.xpc.system")) != NULL);
11784  	_s_xpc_system_domain->req_asid = launchd_audit_session;
11785  	_s_xpc_system_domain->req_asport = launchd_audit_port;
11786  	_s_xpc_system_domain->shortdesc = "system";
11787  	if (pid1_magic) {
11788  		root_jobmgr->monitor_shutdown = true;
11789  	}
11790  
11791  	uint32_t fflags = NOTE_ATTRIB | NOTE_LINK | NOTE_REVOKE | NOTE_EXTEND | NOTE_WRITE;
11792  	s_no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
11793  	if (likely(s_no_hang_fd == -1)) {
11794  		if (jobmgr_assumes_zero_p(root_jobmgr, (s_no_hang_fd = open("/dev", O_EVTONLY | O_NONBLOCK))) != -1) {
11795  			(void)jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_ADD, fflags, 0, root_jobmgr));
11796  		}
11797  	}
11798  	s_no_hang_fd = _fd(s_no_hang_fd);
11799  }
11800  
11801  size_t
11802  our_strhash(const char *s)
11803  {
11804  	size_t c, r = 5381;
11805  
11806  	/* djb2
11807  	 * This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
11808  	 */
11809  
11810  	while ((c = *s++)) {
11811  		r = ((r << 5) + r) + c; // hash*33 + c
11812  	}
11813  
11814  	return r;
11815  }
11816  
11817  size_t
11818  hash_label(const char *label)
11819  {
11820  	return our_strhash(label) % LABEL_HASH_SIZE;
11821  }
11822  
11823  size_t
11824  hash_ms(const char *msstr)
11825  {
11826  	return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
11827  }
11828  
11829  bool
11830  waiting4removal_new(job_t j, mach_port_t rp)
11831  {
11832  	struct waiting_for_removal *w4r;
11833  
11834  	if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
11835  		return false;
11836  	}
11837  
11838  	w4r->reply_port = rp;
11839  
11840  	SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
11841  
11842  	return true;
11843  }
11844  
11845  void
11846  waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
11847  {
11848  	(void)job_assumes_zero(j, job_mig_send_signal_reply(w4r->reply_port, 0));
11849  
11850  	SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
11851  
11852  	free(w4r);
11853  }
11854  
11855  size_t
11856  get_kern_max_proc(void)
11857  {
11858  	int mib[] = { CTL_KERN, KERN_MAXPROC };
11859  	int max = 100;
11860  	size_t max_sz = sizeof(max);
11861  
11862  	(void)posix_assumes_zero(sysctl(mib, 2, &max, &max_sz, NULL, 0));
11863  
11864  	return max;
11865  }
11866  
11867  // See rdar://problem/6271234
11868  void
11869  eliminate_double_reboot(void)
11870  {
11871  	if (unlikely(!pid1_magic)) {
11872  		return;
11873  	}
11874  
11875  	struct stat sb;
11876  	const char *argv[] = { _PATH_BSHELL, "/etc/rc.deferred_install", NULL };
11877  	int result = -1;
11878  
11879  	if (unlikely(stat(argv[1], &sb) != -1)) {
11880  		jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Going to run deferred install script.");
11881  
11882  		pid_t p = 0;
11883  		result = posix_spawnp(&p, argv[0], NULL, NULL, (char **)argv, environ);
11884  		if (result == -1) {
11885  			jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't run deferred install script: %d: %s", result, strerror(result));
11886  			goto out;
11887  		}
11888  
11889  		int wstatus = 0;
11890  		result = waitpid(p, &wstatus, 0);
11891  		if (result == -1) {
11892  			jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Failed to reap deferred install script: %d: %s", errno, strerror(errno));
11893  			goto out;
11894  		}
11895  
11896  		if (WIFEXITED(wstatus)) {
11897  			if ((result = WEXITSTATUS(wstatus)) == 0) {
11898  				jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Deferred install script completed successfully.");
11899  			} else {
11900  				jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script failed with status: %d", WEXITSTATUS(wstatus));
11901  			}
11902  		} else {
11903  			jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Weirdness with install script: %d", wstatus);
11904  		}
11905  	}
11906  out:
11907  	if (result == 0) {
11908  		/* If the unlink(2) was to fail, it would be most likely fail with
11909  		 * EBUSY. All the other failure cases for unlink(2) don't apply when
11910  		 * we're running under PID 1 and have verified that the file exists.
11911  		 * Outside of someone deliberately messing with us (like if
11912  		 * /etc/rc.deferredinstall is actually a looping sym-link or a mount
11913  		 * point for a filesystem) and I/O errors, we should be good.
11914  		 */
11915  		if (unlink(argv[1]) == -1) {
11916  			jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Failed to remove deferred install script: %d: %s", errno, strerror(errno));
11917  		}
11918  	}
11919  }
11920  
11921  void
11922  jetsam_property_setup(launch_data_t obj, const char *key, job_t j)
11923  {
11924  	job_log(j, LOG_DEBUG, "Setting Jetsam properties for job...");
11925  	if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
11926  		j->jetsam_priority = (typeof(j->jetsam_priority))launch_data_get_integer(obj);
11927  
11928  #if XPC_LPI_VERSION >= 20120810
11929  		if (j->jetsam_priority > XPC_JETSAM_PRIORITY_RESERVED && j->jetsam_priority < XPC_JETSAM_PRIORITY_RESERVED + XPC_JETSAM_BAND_LAST) {
11930  			size_t band = j->jetsam_priority - XPC_JETSAM_PRIORITY_RESERVED;
11931  			j->jetsam_priority = _launchd_priority_map[band - 1].priority;
11932  		}
11933  #endif
11934  		job_log(j, LOG_DEBUG, "Priority: %d", j->jetsam_priority);
11935  	} else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
11936  		j->jetsam_memlimit = (typeof(j->jetsam_memlimit))launch_data_get_integer(obj);
11937  		job_log(j, LOG_DEBUG, "Memory limit: %d", j->jetsam_memlimit);
11938  	} else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMITBACKGROUND) == 0) {
11939  		j->jetsam_memory_limit_background = true;
11940  		job_log(j, LOG_DEBUG, "Memory limit is for background state only");
11941  	} else if (strcasecmp(key, LAUNCH_KEY_JETSAMFRONTMOST) == 0) {
11942  		/* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request. 
11943  		 * You can't set this in a plist.
11944  		 */
11945  	} else if (strcasecmp(key, LAUNCH_KEY_JETSAMACTIVE) == 0) {
11946  		// Ignore.
11947  	} else if (strcasecmp(key, LAUNCH_KEY_JETSAMLABEL) == 0) {
11948  		/* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
11949  		 * complain about it.
11950  		 */
11951  	} else {
11952  		job_log(j, LOG_ERR, "Unknown Jetsam key: %s", key);
11953  	}
11954  
11955  	if (unlikely(!j->jetsam_properties)) {
11956  		j->jetsam_properties = true;
11957  	}
11958  }
11959  
11960  void
11961  job_update_jetsam_properties(job_t j, xpc_jetsam_band_t band, uint64_t user_data)
11962  {
11963  #if TARGET_OS_EMBEDDED
11964  	j->jetsam_priority = _launchd_priority_map[band - 1].priority;
11965  	j->jetsam_properties = true;
11966  
11967  	memorystatus_priority_properties_t mjp;
11968  	mjp.priority = j->jetsam_priority;
11969  	mjp.user_data = user_data;
11970  
11971  	size_t size = sizeof(mjp);
11972  	int r = memorystatus_control(MEMORYSTATUS_CMD_SET_PRIORITY_PROPERTIES, j->p, 0, &mjp, size);
11973  	if (r == -1 && errno != ESRCH) {
11974  		(void)job_assumes_zero(j, errno);
11975  	}
11976  #else
11977  #pragma unused(j, band, user_data)
11978  #endif
11979  }
11980  
11981  void
11982  job_update_jetsam_memory_limit(job_t j, int32_t limit)
11983  {
11984  #if TARGET_OS_EMBEDDED
11985  	j->jetsam_memlimit = limit;
11986  	j->jetsam_properties = true;
11987  
11988  	int r = memorystatus_control(MEMORYSTATUS_CMD_SET_JETSAM_HIGH_WATER_MARK, j->p, limit, NULL, 0);
11989  	if (r == -1 && errno != ESRCH) {
11990  		(void)job_assumes_zero(j, errno);
11991  	}
11992  #else
11993  #pragma unused(j, limit)
11994  #endif
11995  }