/ src / launchd / src / runtime.c
runtime.c
   1  /*
   2   * Copyright (c) 1999-2008 Apple Computer, Inc. All rights reserved.
   3   *
   4   * @APPLE_APACHE_LICENSE_HEADER_START@
   5   * 
   6   * Licensed under the Apache License, Version 2.0 (the "License");
   7   * you may not use this file except in compliance with the License.
   8   * You may obtain a copy of the License at
   9   * 
  10   *     http://www.apache.org/licenses/LICENSE-2.0
  11   * 
  12   * Unless required by applicable law or agreed to in writing, software
  13   * distributed under the License is distributed on an "AS IS" BASIS,
  14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15   * See the License for the specific language governing permissions and
  16   * limitations under the License.
  17   * 
  18   * @APPLE_APACHE_LICENSE_HEADER_END@
  19   */
  20  
  21  #include "config.h"
  22  #include "runtime.h"
  23  
  24  #include <mach/mach.h>
  25  #include <mach/mach_error.h>
  26  #include <mach/boolean.h>
  27  #include <mach/message.h>
  28  #include <mach/notify.h>
  29  #include <mach/mig_errors.h>
  30  #include <mach/mach_traps.h>
  31  #include <mach/mach_interface.h>
  32  #include <mach/host_info.h>
  33  #include <mach/mach_host.h>
  34  #include <mach/mach_time.h>
  35  #include <mach/exception.h>
  36  #include <sys/types.h>
  37  #include <sys/stat.h>
  38  #include <sys/sysctl.h>
  39  #include <sys/time.h>
  40  #include <sys/proc.h>
  41  #include <sys/proc_info.h>
  42  #include <libproc.h>
  43  #include <sys/event.h>
  44  #include <sys/queue.h>
  45  #include <sys/socket.h>
  46  #include <sys/mount.h>
  47  #include <sys/reboot.h>
  48  #include <sys/fcntl.h>
  49  #include <sys/kdebug.h>
  50  #include <bsm/libbsm.h>
  51  #include <malloc/malloc.h>
  52  #include <unistd.h>
  53  #include <pthread.h>
  54  #include <errno.h>
  55  #include <string.h>
  56  #include <ctype.h>
  57  #include <stdio.h>
  58  #include <stdlib.h>
  59  #include <stdbool.h>
  60  #include <syslog.h>
  61  #include <signal.h>
  62  #include <dlfcn.h>
  63  #include <os/assumes.h>
  64  
  65  #include "internalServer.h"
  66  #include "internal.h"
  67  #include "notifyServer.h"
  68  #include "mach_excServer.h"
  69  
  70  /* We shouldn't be including these */
  71  #include "launch.h"
  72  #include "launchd.h"
  73  #include "core.h"
  74  #include "vproc.h"
  75  #include "vproc_priv.h"
  76  #include "vproc_internal.h"
  77  #include "jobServer.h"
  78  #include "job_reply.h"
  79  
  80  #include <xpc/launchd.h>
  81  
  82  static mach_port_t ipc_port_set;
  83  static mach_port_t demand_port_set;
  84  static mach_port_t launchd_internal_port;
  85  static int mainkq;
  86  
  87  #define BULK_KEV_MAX 100
  88  static struct kevent *bulk_kev;
  89  static int bulk_kev_i;
  90  static int bulk_kev_cnt;
  91  
  92  static pthread_t kqueue_demand_thread;
  93  
  94  static void mportset_callback(void);
  95  static kq_callback kqmportset_callback = (kq_callback)mportset_callback;
  96  static void *kqueue_demand_loop(void *arg);
  97  
  98  boolean_t launchd_internal_demux(mach_msg_header_t *Request, mach_msg_header_t *Reply);
  99  static void launchd_runtime2(mach_msg_size_t msg_size);
 100  static mach_msg_size_t max_msg_size;
 101  static mig_callback *mig_cb_table;
 102  static size_t mig_cb_table_sz;
 103  static timeout_callback runtime_idle_callback;
 104  static mach_msg_timeout_t runtime_idle_timeout;
 105  static struct ldcred ldc;
 106  static audit_token_t ldc_token;
 107  static size_t runtime_standby_cnt;
 108  
 109  static void do_file_init(void) __attribute__((constructor));
 110  static mach_timebase_info_data_t tbi;
 111  static uint64_t tbi_safe_math_max;
 112  static uint64_t time_of_mach_msg_return;
 113  static double tbi_float_val;
 114  
 115  static const int sigigns[] = { SIGHUP, SIGINT, SIGPIPE, SIGALRM, SIGTERM,
 116  	SIGURG, SIGTSTP, SIGTSTP, SIGCONT, SIGTTIN, SIGTTOU, SIGIO, SIGXCPU,
 117  	SIGXFSZ, SIGVTALRM, SIGPROF, SIGWINCH, SIGINFO, SIGUSR1, SIGUSR2
 118  };
 119  static sigset_t sigign_set;
 120  bool pid1_magic;
 121  bool launchd_apple_internal;
 122  bool launchd_flat_mach_namespace = true;
 123  bool launchd_malloc_log_stacks = false;
 124  bool launchd_use_gmalloc = false;
 125  bool launchd_log_per_user_shutdown = false;
 126  #if !TARGET_OS_EMBEDDED
 127  bool launchd_log_shutdown = true;
 128  #else
 129  bool launchd_log_shutdown = false;
 130  #endif
 131  bool launchd_log_perf = false;
 132  bool launchd_log_debug = false;
 133  bool launchd_trap_sigkill_bugs = false;
 134  bool launchd_no_jetsam_perm_check = false;
 135  bool launchd_osinstaller = false;
 136  bool launchd_allow_global_dyld_envvars = false;
 137  #if TARGET_OS_EMBEDDED
 138  bool launchd_appletv = false;
 139  #endif
 140  pid_t launchd_wsp = 0;
 141  size_t runtime_busy_cnt;
 142  
 143  #if TARGET_OS_EMBEDDED
 144  #define LAUNCHD_CONFIG_PREFIX "/"
 145  #else
 146  #define LAUNCHD_CONFIG_PREFIX "/private/var/db/"
 147  #endif
 148  
 149  #define config_check(s, sb) (stat(LAUNCHD_CONFIG_PREFIX s, &sb) == 0)
 150  
 151  mach_port_t
 152  runtime_get_kernel_port(void)
 153  {
 154  	return launchd_internal_port;
 155  }
 156  
 157  union vproc_mig_max_sz {
 158  	union __RequestUnion__job_mig_job_subsystem req;
 159  	union __ReplyUnion__job_mig_job_subsystem rep;
 160  };
 161  
 162  union internal_max_sz {
 163  	union __RequestUnion__x_internal_subsystem req;
 164  	union __ReplyUnion__internal_subsystem rep;
 165  };
 166  
 167  union xpc_domain_max_sz {
 168  #ifndef DARLING
 169  	union __RequestUnion__xpc_domain_xpc_domain_subsystem req;
 170  	union __ReplyUnion__xpc_domain_xpc_domain_subsystem rep;
 171  #endif // DARLING
 172  };
 173  
 174  union mach_exc_max_sz {
 175  	union __RequestUnion__catch_mach_exc_subsystem req;
 176  	union __ReplyUnion__catch_mach_exc_subsystem rep;
 177  };
 178  
 179  union do_notify_max_sz {
 180  	union __RequestUnion__do_notify_subsystem req;
 181  	union __ReplyUnion__do_notify_subsystem rep;
 182  };
 183  
 184  void
 185  launchd_runtime_init(void)
 186  {
 187  	pid_t p = getpid();
 188  
 189  	(void)posix_assert_zero((mainkq = kqueue()));
 190  
 191  	os_assert_zero(mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &demand_port_set));
 192  	os_assert_zero(mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &ipc_port_set));
 193  	posix_assert_zero(kevent_mod(demand_port_set, EVFILT_MACHPORT, EV_ADD, 0, 0, &kqmportset_callback));
 194  
 195  	os_assert_zero(launchd_mport_create_recv(&launchd_internal_port));
 196  	os_assert_zero(launchd_mport_make_send(launchd_internal_port));
 197  
 198  	max_msg_size = sizeof(union vproc_mig_max_sz);
 199  	if (sizeof(union xpc_domain_max_sz) > max_msg_size) {
 200  		max_msg_size = sizeof(union xpc_domain_max_sz);
 201  	}
 202  
 203  	os_assert_zero(runtime_add_mport(launchd_internal_port, launchd_internal_demux));
 204  	os_assert_zero(pthread_create(&kqueue_demand_thread, NULL, kqueue_demand_loop, NULL));
 205  	os_assert_zero(pthread_detach(kqueue_demand_thread));
 206  
 207  	(void)posix_assumes_zero(sysctlbyname("vfs.generic.noremotehang", NULL, NULL, &p, sizeof(p)));
 208  }
 209  
 210  void
 211  launchd_runtime_init2(void)
 212  {
 213  	size_t i;
 214  
 215  	__OS_COMPILETIME_ASSERT__(SIG_ERR == (typeof(SIG_ERR))-1);
 216  	for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
 217  		sigaddset(&sigign_set, sigigns[i]);
 218  		(void)posix_assumes_zero(signal(sigigns[i], SIG_IGN));
 219  	}
 220  }
 221  
 222  #define FLAGIF(f) if (flags & f) { flags_off += sprintf(flags_off, #f); flags &= ~f; }
 223  const char *
 224  reboot_flags_to_C_names(unsigned int flags)
 225  {
 226  #define MAX_RB_STR "RB_ASKNAME|RB_SINGLE|RB_NOSYNC|RB_HALT|RB_INITNAME|RB_DFLTROOT|RB_ALTBOOT|RB_UNIPROC|RB_SAFEBOOT|RB_UPSDELAY|0xdeadbeeffeedface"
 227  	static char flags_buf[sizeof(MAX_RB_STR)];
 228  	char *flags_off = NULL;
 229  
 230  	if (flags == 0) {
 231  		return "RB_AUTOBOOT";
 232  	}
 233  
 234  	while (flags) {
 235  		if (flags_off) {
 236  			*flags_off = '|';
 237  			flags_off++;
 238  			*flags_off = '\0';
 239  		} else {
 240  			flags_off = flags_buf;
 241  		}
 242  
 243  		FLAGIF(RB_ASKNAME)
 244  		else FLAGIF(RB_SINGLE)
 245  		else FLAGIF(RB_NOSYNC)
 246  		else FLAGIF(RB_HALT)
 247  		else FLAGIF(RB_INITNAME)
 248  		else FLAGIF(RB_DFLTROOT)
 249  		else FLAGIF(RB_ALTBOOT)
 250  		else FLAGIF(RB_UNIPROC)
 251  		else FLAGIF(RB_SAFEBOOT)
 252  		else FLAGIF(RB_UPSDELAY)
 253  		else {
 254  			flags_off += sprintf(flags_off, "0x%x", flags);
 255  			flags = 0;
 256  		}
 257  	}
 258  
 259  	return flags_buf;
 260  }
 261  
 262  const char *
 263  signal_to_C_name(unsigned int sig)
 264  {
 265  	static char unknown[25];
 266  
 267  #define SIG2CASE(sg)	case sg: return #sg
 268  
 269  	switch (sig) {
 270  	SIG2CASE(SIGHUP);
 271  	SIG2CASE(SIGINT);
 272  	SIG2CASE(SIGQUIT);
 273  	SIG2CASE(SIGILL);
 274  	SIG2CASE(SIGTRAP);
 275  	SIG2CASE(SIGABRT);
 276  	SIG2CASE(SIGFPE);
 277  	SIG2CASE(SIGKILL);
 278  	SIG2CASE(SIGBUS);
 279  	SIG2CASE(SIGSEGV);
 280  	SIG2CASE(SIGSYS);
 281  	SIG2CASE(SIGPIPE);
 282  	SIG2CASE(SIGALRM);
 283  	SIG2CASE(SIGTERM);
 284  	SIG2CASE(SIGURG);
 285  	SIG2CASE(SIGSTOP);
 286  	SIG2CASE(SIGTSTP);
 287  	SIG2CASE(SIGCONT);
 288  	SIG2CASE(SIGCHLD);
 289  	SIG2CASE(SIGTTIN);
 290  	SIG2CASE(SIGTTOU);
 291  	SIG2CASE(SIGIO);
 292  	SIG2CASE(SIGXCPU);
 293  	SIG2CASE(SIGXFSZ);
 294  	SIG2CASE(SIGVTALRM);
 295  	SIG2CASE(SIGPROF);
 296  	SIG2CASE(SIGWINCH);
 297  	SIG2CASE(SIGINFO);
 298  	SIG2CASE(SIGUSR1);
 299  	SIG2CASE(SIGUSR2);
 300  	default:
 301  		snprintf(unknown, sizeof(unknown), "%u", sig);
 302  		return unknown;
 303  	}
 304  }
 305  
 306  void
 307  log_kevent_struct(int level, struct kevent *kev_base, int indx)
 308  {
 309  	struct kevent *kev = &kev_base[indx];
 310  	const char *filter_str;
 311  	char ident_buf[100];
 312  	char filter_buf[100];
 313  	char fflags_buf[1000];
 314  	char flags_buf[1000] = "0x0";
 315  	char *flags_off = NULL;
 316  	char *fflags_off = NULL;
 317  	unsigned short flags = kev->flags;
 318  	unsigned int fflags = kev->fflags;
 319  
 320  	if (likely(!(LOG_MASK(level & ~LOG_CONSOLE) & LOG_DEBUG))) {
 321  		return;
 322  	}
 323  
 324  	if (flags) while (flags) {
 325  		if (flags_off) {
 326  			*flags_off = '|';
 327  			flags_off++;
 328  			*flags_off = '\0';
 329  		} else {
 330  			flags_off = flags_buf;
 331  		}
 332  
 333  		FLAGIF(EV_ADD)
 334  		else FLAGIF(EV_RECEIPT)
 335  		else FLAGIF(EV_DELETE)
 336  		else FLAGIF(EV_ENABLE)
 337  		else FLAGIF(EV_DISABLE)
 338  		else FLAGIF(EV_CLEAR)
 339  		else FLAGIF(EV_EOF)
 340  		else FLAGIF(EV_ONESHOT)
 341  		else FLAGIF(EV_ERROR)
 342  		else {
 343  			flags_off += sprintf(flags_off, "0x%hx", flags);
 344  			flags = 0;
 345  		}
 346  	}
 347  
 348  	snprintf(ident_buf, sizeof(ident_buf), "%ld", kev->ident);
 349  	snprintf(fflags_buf, sizeof(fflags_buf), "0x%x", fflags);
 350  
 351  	switch (kev->filter) {
 352  	case EVFILT_READ:
 353  		filter_str = "EVFILT_READ";
 354  		break;
 355  	case EVFILT_WRITE:
 356  		filter_str = "EVFILT_WRITE";
 357  		break;
 358  	case EVFILT_AIO:
 359  		filter_str = "EVFILT_AIO";
 360  		break;
 361  	case EVFILT_VNODE:
 362  		filter_str = "EVFILT_VNODE";
 363  		if (fflags) while (fflags) {
 364  			if (fflags_off) {
 365  				*fflags_off = '|';
 366  				fflags_off++;
 367  				*fflags_off = '\0';
 368  			} else {
 369  				fflags_off = fflags_buf;
 370  			}
 371  
 372  #define FFLAGIF(ff) if (fflags & ff) { fflags_off += sprintf(fflags_off, #ff); fflags &= ~ff; }
 373  
 374  			FFLAGIF(NOTE_DELETE)
 375  			else FFLAGIF(NOTE_WRITE)
 376  			else FFLAGIF(NOTE_EXTEND)
 377  			else FFLAGIF(NOTE_ATTRIB)
 378  			else FFLAGIF(NOTE_LINK)
 379  			else FFLAGIF(NOTE_RENAME)
 380  			else FFLAGIF(NOTE_REVOKE)
 381  			else {
 382  				fflags_off += sprintf(fflags_off, "0x%x", fflags);
 383  				fflags = 0;
 384  			}
 385  		}
 386  		break;
 387  	case EVFILT_PROC:
 388  		filter_str = "EVFILT_PROC";
 389  		if (fflags) while (fflags) {
 390  			if (fflags_off) {
 391  				*fflags_off = '|';
 392  				fflags_off++;
 393  				*fflags_off = '\0';
 394  			} else {
 395  				fflags_off = fflags_buf;
 396  			}
 397  
 398  			FFLAGIF(NOTE_EXIT)
 399  			else FFLAGIF(NOTE_REAP)
 400  			else FFLAGIF(NOTE_FORK)
 401  			else FFLAGIF(NOTE_EXEC)
 402  			else FFLAGIF(NOTE_SIGNAL)
 403  			else FFLAGIF(NOTE_TRACK)
 404  			else FFLAGIF(NOTE_TRACKERR)
 405  			else FFLAGIF(NOTE_CHILD)
 406  			else {
 407  				fflags_off += sprintf(fflags_off, "0x%x", fflags);
 408  				fflags = 0;
 409  			}
 410  		}
 411  		break;
 412  	case EVFILT_SIGNAL:
 413  		filter_str = "EVFILT_SIGNAL";
 414  		strcpy(ident_buf, signal_to_C_name(kev->ident));
 415  		break;
 416  	case EVFILT_TIMER:
 417  		filter_str = "EVFILT_TIMER";
 418  		snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
 419  		if (fflags) while (fflags) {
 420  			if (fflags_off) {
 421  				*fflags_off = '|';
 422  				fflags_off++;
 423  				*fflags_off = '\0';
 424  			} else {
 425  				fflags_off = fflags_buf;
 426  			}
 427  
 428  			FFLAGIF(NOTE_SECONDS)
 429  			else FFLAGIF(NOTE_USECONDS)
 430  			else FFLAGIF(NOTE_NSECONDS)
 431  			else FFLAGIF(NOTE_ABSOLUTE)
 432  			else {
 433  				fflags_off += sprintf(fflags_off, "0x%x", fflags);
 434  				fflags = 0;
 435  			}
 436  		}
 437  		break;
 438  	case EVFILT_MACHPORT:
 439  		filter_str = "EVFILT_MACHPORT";
 440  		snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
 441  		break;
 442  	case EVFILT_FS:
 443  		filter_str = "EVFILT_FS";
 444  		snprintf(ident_buf, sizeof(ident_buf), "0x%lx", kev->ident);
 445  		if (fflags) while (fflags) {
 446  			if (fflags_off) {
 447  				*fflags_off = '|';
 448  				fflags_off++;
 449  				*fflags_off = '\0';
 450  			} else {
 451  				fflags_off = fflags_buf;
 452  			}
 453  
 454  			FFLAGIF(VQ_NOTRESP)
 455  			else FFLAGIF(VQ_NEEDAUTH)
 456  			else FFLAGIF(VQ_LOWDISK)
 457  			else FFLAGIF(VQ_MOUNT)
 458  			else FFLAGIF(VQ_UNMOUNT)
 459  			else FFLAGIF(VQ_DEAD)
 460  			else FFLAGIF(VQ_ASSIST)
 461  			else FFLAGIF(VQ_NOTRESPLOCK)
 462  			else FFLAGIF(VQ_UPDATE)
 463  			else {
 464  				fflags_off += sprintf(fflags_off, "0x%x", fflags);
 465  				fflags = 0;
 466  			}
 467  		}
 468  		break;
 469  	default:
 470  		snprintf(filter_buf, sizeof(filter_buf), "%hd", kev->filter);
 471  		filter_str = filter_buf;
 472  		break;
 473  	}
 474  
 475  	launchd_syslog(level, "KEVENT[%d]: udata = %p data = 0x%lx ident = %s filter = %s flags = %s fflags = %s",
 476  			indx, kev->udata, kev->data, ident_buf, filter_str, flags_buf, fflags_buf);
 477  }
 478  
 479  void
 480  mportset_callback(void)
 481  {
 482  	mach_port_name_array_t members;
 483  	mach_msg_type_number_t membersCnt;
 484  	mach_port_status_t status;
 485  	mach_msg_type_number_t statusCnt;
 486  	struct kevent kev;
 487  	unsigned int i;
 488  
 489  	if (os_assumes_zero(mach_port_get_set_status(mach_task_self(), demand_port_set, &members, &membersCnt)) != 0) {
 490  		return;
 491  	}
 492  
 493  	for (i = 0; i < membersCnt; i++) {
 494  		statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
 495  		if (mach_port_get_attributes(mach_task_self(), members[i], MACH_PORT_RECEIVE_STATUS, (mach_port_info_t)&status,
 496  					&statusCnt) != KERN_SUCCESS) {
 497  			continue;
 498  		}
 499  		if (status.mps_msgcount) {
 500  			EV_SET(&kev, members[i], EVFILT_MACHPORT, 0, 0, 0, job_find_by_service_port(members[i]));
 501  #if 0
 502  			if (kev.udata != NULL) {
 503  #endif
 504  				log_kevent_struct(LOG_DEBUG, &kev, 0);
 505  				(*((kq_callback *)kev.udata))(kev.udata, &kev);
 506  #if 0
 507  			} else {
 508  				log_kevent_struct(LOG_ERR, &kev, 0);
 509  			}
 510  #endif
 511  			/* the callback may have tainted our ability to continue this for loop */
 512  			break;
 513  		}
 514  	}
 515  
 516  	(void)os_assumes_zero(vm_deallocate(mach_task_self(), (vm_address_t)members, (vm_size_t) membersCnt * sizeof(mach_port_name_t)));
 517  }
 518  
 519  void *
 520  kqueue_demand_loop(void *arg __attribute__((unused)))
 521  {
 522  	fd_set rfds;
 523  
 524  	/*
 525  	 * Yes, at first glance, calling select() on a kqueue seems silly.
 526  	 *
 527  	 * This avoids a race condition between the main thread and this helper
 528  	 * thread by ensuring that we drain kqueue events on the same thread
 529  	 * that manipulates the kqueue.
 530  	 */
 531  
 532  	for (;;) {
 533  		FD_ZERO(&rfds);
 534  		FD_SET(mainkq, &rfds);
 535  		int r = select(mainkq + 1, &rfds, NULL, NULL, NULL);
 536  		if (r == 1) {
 537  			(void)os_assumes_zero(handle_kqueue(launchd_internal_port, mainkq));
 538  		} else if (posix_assumes_zero(r) != -1) {
 539  			(void)os_assumes_zero(r);
 540  		}
 541  	}
 542  
 543  	return NULL;
 544  }
 545  
 546  kern_return_t
 547  x_handle_kqueue(mach_port_t junk __attribute__((unused)), integer_t fd)
 548  {
 549  	struct timespec ts = { 0, 0 };
 550  	struct kevent *kevi, kev[BULK_KEV_MAX];
 551  	int i;
 552  
 553  	bulk_kev = kev;
 554  
 555  	if ((bulk_kev_cnt = kevent(fd, NULL, 0, kev, BULK_KEV_MAX, &ts)) != -1) {
 556  #if 0	
 557  		for (i = 0; i < bulk_kev_cnt; i++) {
 558  			log_kevent_struct(LOG_DEBUG, &kev[0], i);
 559  		}
 560  #endif
 561  		for (i = 0; i < bulk_kev_cnt; i++) {
 562  			bulk_kev_i = i;
 563  			kevi = &kev[i];
 564  
 565  			if (kevi->filter) {
 566  				launchd_syslog(LOG_DEBUG, "Dispatching kevent (ident/filter): %lu/%hd", kevi->ident, kevi->filter);
 567  				log_kevent_struct(LOG_DEBUG, kev, i);
 568  
 569  				struct job_check_s {
 570  					kq_callback kqc;
 571  				};
 572  
 573  				struct job_check_s *check = kevi->udata;
 574  				if (check && check->kqc) {
 575  					runtime_ktrace(RTKT_LAUNCHD_BSD_KEVENT|DBG_FUNC_START, kevi->ident, kevi->filter, kevi->fflags);
 576  					(*((kq_callback *)kevi->udata))(kevi->udata, kevi);
 577  					runtime_ktrace0(RTKT_LAUNCHD_BSD_KEVENT|DBG_FUNC_END);
 578  				} else {
 579  					launchd_syslog(LOG_ERR, "The following kevent had invalid context data. Please file a bug with the following information:");
 580  					log_kevent_struct(LOG_EMERG, &kev[0], i);
 581  				}
 582  				launchd_syslog(LOG_DEBUG, "Handled kevent.");
 583  			}
 584  		}
 585  	} else {
 586  		(void)os_assumes_zero(errno);
 587  	}
 588  
 589  	bulk_kev = NULL;
 590  
 591  	return 0;
 592  }
 593  
 594  void
 595  launchd_runtime(void)
 596  {
 597  	launchd_runtime2(max_msg_size);
 598  	dispatch_main();
 599  }
 600  
 601  kern_return_t
 602  launchd_set_bport(mach_port_t name)
 603  {
 604  	return errno = task_set_bootstrap_port(mach_task_self(), name);
 605  }
 606  
 607  kern_return_t
 608  launchd_get_bport(mach_port_t *name)
 609  {
 610  	return errno = task_get_bootstrap_port(mach_task_self(), name);
 611  }
 612  
 613  kern_return_t
 614  launchd_mport_notify_req(mach_port_t name, mach_msg_id_t which)
 615  {
 616  	mach_port_mscount_t msgc = (which == MACH_NOTIFY_PORT_DESTROYED) ? 0 : 1;
 617  	mach_port_t previous, where = (which == MACH_NOTIFY_NO_SENDERS) ? name : launchd_internal_port;
 618  
 619  	if (which == MACH_NOTIFY_NO_SENDERS) {
 620  		/* Always make sure the send count is zero, in case a receive right is
 621  		 * reused
 622  		 */
 623  		errno = mach_port_set_mscount(mach_task_self(), name, 0);
 624  		if (unlikely(errno != KERN_SUCCESS)) {
 625  			return errno;
 626  		}
 627  	}
 628  
 629  	errno = mach_port_request_notification(mach_task_self(), name, which, msgc, where, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous);
 630  
 631  	if (likely(errno == 0) && previous != MACH_PORT_NULL) {
 632  		(void)os_assumes_zero(launchd_mport_deallocate(previous));
 633  	}
 634  
 635  	return errno;
 636  }
 637  
 638  pid_t
 639  runtime_fork(mach_port_t bsport)
 640  {
 641  	sigset_t emptyset, oset;
 642  	pid_t r = -1;
 643  	int saved_errno;
 644  	size_t i;
 645  
 646  	sigemptyset(&emptyset);
 647  
 648  	(void)os_assumes_zero(launchd_mport_make_send(bsport));
 649  	(void)os_assumes_zero(launchd_set_bport(bsport));
 650  	(void)os_assumes_zero(launchd_mport_deallocate(bsport));
 651  
 652  	__OS_COMPILETIME_ASSERT__(SIG_ERR == (typeof(SIG_ERR))-1);
 653  	(void)posix_assumes_zero(sigprocmask(SIG_BLOCK, &sigign_set, &oset));
 654  	for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
 655  		(void)posix_assumes_zero(signal(sigigns[i], SIG_DFL));
 656  	}
 657  
 658  	r = fork();
 659  	saved_errno = errno;
 660  
 661  	if (r != 0) {
 662  		for (i = 0; i < (sizeof(sigigns) / sizeof(int)); i++) {
 663  			(void)posix_assumes_zero(signal(sigigns[i], SIG_IGN));
 664  		}
 665  		(void)posix_assumes_zero(sigprocmask(SIG_SETMASK, &oset, NULL));
 666  		(void)os_assumes_zero(launchd_set_bport(MACH_PORT_NULL));
 667  	} else {
 668  		pid_t p = -getpid();
 669  		(void)posix_assumes_zero(sysctlbyname("vfs.generic.noremotehang", NULL, NULL, &p, sizeof(p)));
 670  		(void)posix_assumes_zero(sigprocmask(SIG_SETMASK, &emptyset, NULL));
 671  	}
 672  
 673  	errno = saved_errno;
 674  
 675  	return r;
 676  }
 677  
 678  
 679  void
 680  runtime_set_timeout(timeout_callback to_cb, unsigned int sec)
 681  {
 682  	if (sec == 0 || to_cb == NULL) {
 683  		runtime_idle_callback = NULL;
 684  		runtime_idle_timeout = 0;
 685  	}
 686  
 687  	runtime_idle_callback = to_cb;
 688  	runtime_idle_timeout = sec * 1000;
 689  }
 690  
 691  kern_return_t
 692  runtime_add_mport(mach_port_t name, mig_callback demux)
 693  {
 694  	size_t needed_table_sz = (MACH_PORT_INDEX(name) + 1) * sizeof(mig_callback);
 695  	mach_port_t target_set = demux ? ipc_port_set : demand_port_set;
 696  
 697  	if (unlikely(needed_table_sz > mig_cb_table_sz)) {
 698  		needed_table_sz *= 2; /* Let's try and avoid realloc'ing for a while */
 699  		mig_callback *new_table = malloc(needed_table_sz);
 700  
 701  		if (!new_table) {
 702  			return KERN_RESOURCE_SHORTAGE;
 703  		}
 704  
 705  		if (likely(mig_cb_table)) {
 706  			memcpy(new_table, mig_cb_table, mig_cb_table_sz);
 707  			free(mig_cb_table);
 708  		}
 709  
 710  		mig_cb_table_sz = needed_table_sz;
 711  		mig_cb_table = new_table;
 712  	}
 713  
 714  	mig_cb_table[MACH_PORT_INDEX(name)] = demux;
 715  
 716  	return errno = mach_port_move_member(mach_task_self(), name, target_set);
 717  }
 718  
 719  kern_return_t
 720  runtime_remove_mport(mach_port_t name)
 721  {
 722  	mig_cb_table[MACH_PORT_INDEX(name)] = NULL;
 723  
 724  	return errno = mach_port_move_member(mach_task_self(), name, MACH_PORT_NULL);
 725  }
 726  
 727  kern_return_t
 728  launchd_mport_make_send(mach_port_t name)
 729  {
 730  	return errno = mach_port_insert_right(mach_task_self(), name, name, MACH_MSG_TYPE_MAKE_SEND);
 731  }
 732  
 733  kern_return_t
 734  launchd_mport_copy_send(mach_port_t name)
 735  {
 736  	return errno = mach_port_insert_right(mach_task_self(), name, name, MACH_MSG_TYPE_COPY_SEND);
 737  }
 738  
 739  kern_return_t
 740  launchd_mport_make_send_once(mach_port_t name, mach_port_t *so)
 741  {
 742  	mach_msg_type_name_t right = 0;
 743  	return errno = mach_port_extract_right(mach_task_self(), name, MACH_MSG_TYPE_MAKE_SEND_ONCE, so, &right);
 744  }
 745  
 746  kern_return_t
 747  launchd_mport_close_recv(mach_port_t name)
 748  {
 749  	return errno = mach_port_mod_refs(mach_task_self(), name, MACH_PORT_RIGHT_RECEIVE, -1);
 750  }
 751  
 752  kern_return_t
 753  launchd_mport_create_recv(mach_port_t *name)
 754  {
 755  	return errno = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, name);
 756  }
 757  
 758  kern_return_t
 759  launchd_mport_deallocate(mach_port_t name)
 760  {
 761  	return errno = mach_port_deallocate(mach_task_self(), name);
 762  }
 763  
 764  int
 765  kevent_bulk_mod(struct kevent *kev, size_t kev_cnt)
 766  {
 767  	size_t i;
 768  
 769  	for (i = 0; i < kev_cnt; i++) {
 770  		kev[i].flags |= EV_CLEAR|EV_RECEIPT;
 771  	}
 772  
 773  	return kevent(mainkq, kev, kev_cnt, kev, kev_cnt, NULL);
 774  }
 775  
 776  int
 777  kevent_mod(uintptr_t ident, short filter, u_short flags, u_int fflags, intptr_t data, void *udata)
 778  {
 779  	struct kevent kev;
 780  	int r;
 781  
 782  	switch (filter) {
 783  	case EVFILT_READ:
 784  	case EVFILT_WRITE:
 785  		break;
 786  	case EVFILT_TIMER:
 787  		/* Workaround 5225889 */
 788  		if (flags & EV_ADD) {
 789  			(void)kevent_mod(ident, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
 790  		}
 791  		/* fall through */
 792  	default:
 793  		flags |= EV_CLEAR;
 794  		break;
 795  	}
 796  
 797  	flags |= EV_RECEIPT;
 798  
 799  	if (flags & EV_ADD && !udata) {
 800  		errno = EINVAL;
 801  		return -1;
 802  	} else if ((flags & EV_DELETE) && bulk_kev) {
 803  		int i = 0;
 804  		for (i = bulk_kev_i + 1; i < bulk_kev_cnt; i++) {
 805  			if (bulk_kev[i].filter == filter && bulk_kev[i].ident == ident) {
 806  				launchd_syslog(LOG_DEBUG, "Pruning the following kevent:");
 807  				log_kevent_struct(LOG_DEBUG, &bulk_kev[0], i);
 808  				bulk_kev[i].filter = (short)0;
 809  			}
 810  		}
 811  	}
 812  
 813  	EV_SET(&kev, ident, filter, flags, fflags, data, udata);
 814  
 815  	r = kevent(mainkq, &kev, 1, &kev, 1, NULL);
 816  
 817  	if (r != 1) {
 818  		return -1;
 819  	}
 820  
 821  	if (kev.flags & EV_ERROR) {
 822  		if ((flags & EV_ADD) && kev.data) {
 823  			launchd_syslog(LOG_DEBUG, "%s(): See the next line...", __func__);
 824  			log_kevent_struct(LOG_DEBUG, &kev, 0);
 825  			errno = kev.data;
 826  			return -1;
 827  		}
 828  	} else {
 829  		(void)os_assert_zero(kev.flags);
 830  	}
 831  
 832  	return r;
 833  }
 834  
 835  boolean_t
 836  launchd_internal_demux(mach_msg_header_t *Request, mach_msg_header_t *Reply)
 837  {
 838  	if (internal_server_routine(Request)) {
 839  		return internal_server(Request, Reply);
 840  	} else if (notify_server_routine(Request)) {
 841  		return notify_server(Request, Reply);
 842  	} else {
 843  		return mach_exc_server(Request, Reply);
 844  	}
 845  }
 846  
 847  kern_return_t
 848  do_mach_notify_port_destroyed(mach_port_t notify __attribute__((unused)), mach_port_t rights)
 849  {
 850  	/* This message is sent to us when a receive right is returned to us. */
 851  	if (!job_ack_port_destruction(rights)) {
 852  		(void)os_assumes_zero(launchd_mport_close_recv(rights));
 853  	}
 854  
 855  	return KERN_SUCCESS;
 856  }
 857  
 858  kern_return_t
 859  do_mach_notify_port_deleted(mach_port_t notify __attribute__((unused)), mach_port_name_t name __attribute__((unused)))
 860  {
 861  	/* If we deallocate/destroy/mod_ref away a port with a pending
 862  	 * notification, the original notification message is replaced with
 863  	 * this message. To quote a Mach kernel expert, "the kernel has a
 864  	 * send-once right that has to be used somehow."
 865  	 */
 866  	return KERN_SUCCESS;
 867  }
 868  
 869  kern_return_t
 870  do_mach_notify_no_senders(mach_port_t notify, mach_port_mscount_t mscount __attribute__((unused)))
 871  {
 872  	job_t j = job_mig_intran(notify);
 873  
 874  	/* This message is sent to us when the last customer of one of our objects
 875  	 * goes away.
 876  	 */
 877  
 878  	if (!j) {
 879  		return KERN_FAILURE;
 880  	}
 881  
 882  	job_ack_no_senders(j);
 883  
 884  	return KERN_SUCCESS;
 885  }
 886  
 887  kern_return_t
 888  do_mach_notify_send_once(mach_port_t notify __attribute__((unused)))
 889  {
 890  	/*
 891  	 * This message is sent for each send-once right that is deallocated without
 892  	 * being used.
 893  	 */
 894  
 895  	return KERN_SUCCESS;
 896  }
 897  
 898  kern_return_t
 899  do_mach_notify_dead_name(mach_port_t notify __attribute__((unused)), mach_port_name_t name)
 900  {
 901  	/* This message is sent to us when one of our send rights no longer has a
 902  	 * receiver somewhere else on the system.
 903  	 */
 904  	if (name == launchd_drain_reply_port) {
 905  		(void)os_assumes_zero(launchd_mport_deallocate(name));
 906  		launchd_drain_reply_port = MACH_PORT_NULL;
 907  	}
 908  
 909  	if (root_jobmgr) {
 910  		root_jobmgr = jobmgr_delete_anything_with_port(root_jobmgr, name);
 911  	}
 912  
 913  	/* A dead-name notification about a port appears to increment the rights on
 914  	 * said port. Let's deallocate it so that we don't leak dead-name ports.
 915  	 */
 916  	(void)os_assumes_zero(launchd_mport_deallocate(name));
 917  
 918  	return KERN_SUCCESS;
 919  }
 920  
 921  mach_msg_return_t
 922  launchd_exc_runtime_once(mach_port_t port, mach_msg_size_t rcv_msg_size, mach_msg_size_t send_msg_size, mig_reply_error_t *bufRequest, mig_reply_error_t *bufReply, mach_msg_timeout_t to)
 923  {
 924  	mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
 925  	mach_msg_option_t rcv_options =	MACH_RCV_MSG
 926  		| MACH_RCV_TIMEOUT
 927  		| MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT)
 928  		| MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0);
 929  
 930  	do {
 931  		mr = mach_msg(&bufRequest->Head, rcv_options, 0, rcv_msg_size, port, to, MACH_PORT_NULL);
 932  		switch (mr) {
 933  		case MACH_RCV_TIMED_OUT:
 934  			launchd_syslog(LOG_DEBUG, "Message queue is empty.");
 935  			break;
 936  		case MACH_RCV_TOO_LARGE:
 937  			launchd_syslog(LOG_INFO, "Message is larger than %u bytes.", rcv_msg_size);
 938  			break;
 939  		default:
 940  			(void)os_assumes_zero(mr);
 941  		}
 942  
 943  		if (mr == MACH_MSG_SUCCESS) {
 944  			if (!mach_exc_server(&bufRequest->Head, &bufReply->Head)) {
 945  				launchd_syslog(LOG_WARNING, "Exception server routine failed.");
 946  				break;
 947  			}
 948  
 949  			mach_msg_return_t smr = ~MACH_MSG_SUCCESS;
 950  			mach_msg_option_t send_options = MACH_SEND_MSG | MACH_SEND_TIMEOUT;
 951  
 952  			(void)os_assumes(bufReply->Head.msgh_size <= send_msg_size);
 953  			smr = mach_msg(&bufReply->Head, send_options, bufReply->Head.msgh_size, 0, MACH_PORT_NULL, to + 100, MACH_PORT_NULL);
 954  			switch (smr) {
 955  			case MACH_SEND_TIMED_OUT:
 956  				launchd_syslog(LOG_WARNING, "Timed out while trying to send reply to exception message.");
 957  				break;
 958  			case MACH_SEND_INVALID_DEST:
 959  				launchd_syslog(LOG_WARNING, "Tried sending a message to a port that we don't possess a send right to.");
 960  				break;
 961  			default:
 962  				if (smr) {
 963  					launchd_syslog(LOG_WARNING, "Couldn't deliver exception reply: 0x%x", smr);
 964  				}
 965  				break;
 966  			}
 967  		}
 968  	} while (0);
 969  
 970  	return mr;
 971  }
 972  
 973  void
 974  runtime_record_caller_creds(audit_token_t *token)
 975  {
 976  	(void)memcpy(&ldc_token, token, sizeof(*token));
 977  	audit_token_to_au32(*token, NULL, &ldc.euid,&ldc.egid, &ldc.uid, &ldc.gid,
 978  		&ldc.pid, &ldc.asid, NULL);
 979  }
 980  
 981  struct ldcred *
 982  runtime_get_caller_creds(void)
 983  {
 984  	return &ldc;
 985  }
 986  
 987  audit_token_t *
 988  runtime_get_caller_token(void)
 989  {
 990  	return &ldc_token;
 991  }
 992  
 993  static boolean_t
 994  launchd_mig_demux(mach_msg_header_t *request, mach_msg_header_t *reply)
 995  {
 996  	boolean_t result = false;
 997  
 998  	time_of_mach_msg_return = runtime_get_opaque_time();
 999  	launchd_syslog(LOG_DEBUG, "MIG callout: %u", request->msgh_id);
1000  	mig_callback the_demux = mig_cb_table[MACH_PORT_INDEX(request->msgh_local_port)];
1001  	mach_msg_audit_trailer_t *tp = (mach_msg_audit_trailer_t *)((vm_offset_t)request + round_msg(request->msgh_size));
1002  	runtime_record_caller_creds(&tp->msgh_audit);
1003  
1004  	result = the_demux(request, reply);
1005  	if (!result) {
1006  		launchd_syslog(LOG_DEBUG, "Demux failed. Trying other subsystems...");
1007  		if (request->msgh_id == MACH_NOTIFY_NO_SENDERS) {
1008  			launchd_syslog(LOG_DEBUG, "MACH_NOTIFY_NO_SENDERS");
1009  			result = notify_server(request, reply);
1010  		} else if (the_demux == job_server) {
1011  			launchd_syslog(LOG_DEBUG, "Trying domain subsystem...");
1012  			result = xpc_domain_server(request, reply);
1013  		} else {
1014  			launchd_syslog(LOG_ERR, "Cannot handle MIG request with ID: 0x%x", request->msgh_id);
1015  		}
1016  	} else {
1017  		launchd_syslog(LOG_DEBUG, "MIG demux succeeded.");
1018  	}
1019  
1020  	return result;
1021  }
1022  
1023  void
1024  launchd_runtime2(mach_msg_size_t msg_size)
1025  {
1026  	for (;;) {
1027  		launchd_log_push();
1028  
1029  		mach_port_t recvp = MACH_PORT_NULL;
1030  		xpc_object_t request = NULL;
1031  		int result = xpc_pipe_try_receive(ipc_port_set, &request, &recvp, launchd_mig_demux, msg_size, 0);
1032  		if (result == 0 && request) {
1033  			boolean_t handled = false;
1034  			time_of_mach_msg_return = runtime_get_opaque_time();
1035  			launchd_syslog(LOG_DEBUG, "XPC request.");
1036  
1037  			xpc_object_t reply = NULL;
1038  			if (xpc_event_demux(recvp, request, &reply)) {
1039  				handled = true;
1040  			} else if (xpc_process_demux(recvp, request, &reply)) {
1041  				handled = true;
1042  			}
1043  
1044  			if (!handled) {
1045  				launchd_syslog(LOG_DEBUG, "XPC routine could not be handled.");
1046  				xpc_release(request);
1047  				continue;
1048  			}
1049  
1050  			launchd_syslog(LOG_DEBUG, "XPC routine was handled.");
1051  			if (reply) {
1052  				launchd_syslog(LOG_DEBUG, "Sending reply.");
1053  				result = xpc_pipe_routine_reply(reply);
1054  				if (result == 0) {
1055  					launchd_syslog(LOG_DEBUG, "Reply sent successfully.");
1056  				} else if (result != EPIPE) {
1057  					launchd_syslog(LOG_ERR, "Failed to send reply message: 0x%x", result);
1058  				}
1059  
1060  				xpc_release(reply);
1061  			}
1062  
1063  			xpc_release(request);
1064  		} else if (result == 0) {
1065  			launchd_syslog(LOG_DEBUG, "MIG request.");
1066  		} else if (result == EINVAL) {
1067  			launchd_syslog(LOG_ERR, "Rejected invalid request message.");
1068  		}
1069  	}
1070  }
1071  
1072  int
1073  runtime_close(int fd)
1074  {
1075  	int i;
1076  
1077  	if (bulk_kev) for (i = bulk_kev_i + 1; i < bulk_kev_cnt; i++) {
1078  		switch (bulk_kev[i].filter) {
1079  		case EVFILT_VNODE:
1080  		case EVFILT_WRITE:
1081  		case EVFILT_READ:
1082  			if (unlikely((int)bulk_kev[i].ident == fd)) {
1083  				launchd_syslog(LOG_DEBUG, "Skipping kevent index: %d", i);
1084  				bulk_kev[i].filter = 0;
1085  			}
1086  		default:
1087  			break;
1088  		}
1089  	}
1090  
1091  	return close(fd);
1092  }
1093  
1094  int
1095  runtime_fsync(int fd)
1096  {
1097  #if 0
1098  	if (launchd_apple_internal) {
1099  		return fcntl(fd, F_FULLFSYNC, NULL);
1100  	} else {
1101  		return fsync(fd);
1102  	}
1103  #else
1104  	return fsync(fd);
1105  #endif
1106  }
1107  
1108  /*
1109   * We should break this into two reference counts.
1110   *
1111   * One for hard references that would prevent exiting.
1112   * One for soft references that would only prevent idle exiting.
1113   *
1114   * In the long run, reference counting should completely automate when a
1115   * process can and should exit.
1116   */
1117  void
1118  runtime_add_ref(void)
1119  {
1120  	if (!pid1_magic) {
1121  #if !TARGET_OS_EMBEDDED
1122  		vproc_transaction_begin(NULL);
1123  #endif
1124  	}
1125  
1126  	runtime_busy_cnt++;
1127  	launchd_syslog(LOG_PERF, "Incremented busy count. Now: %lu", runtime_busy_cnt);
1128  	runtime_remove_timer();
1129  }
1130  
1131  void
1132  runtime_del_ref(void)
1133  {
1134  	if (!pid1_magic) {
1135  #if !TARGET_OS_EMBEDDED
1136  		if (_vproc_transaction_count() == 0) {
1137  			launchd_syslog(LOG_PERF, "Exiting cleanly.");
1138  		}
1139  
1140  		vproc_transaction_end(NULL, NULL);
1141  #endif
1142  	}
1143  
1144  	runtime_busy_cnt--;
1145  	launchd_syslog(LOG_PERF, "Decremented busy count. Now: %lu", runtime_busy_cnt);
1146  	runtime_install_timer();
1147  }
1148  
1149  void
1150  runtime_add_weak_ref(void)
1151  {
1152  	if (!pid1_magic) {
1153  #if !TARGET_OS_EMBEDDED
1154  		_vproc_standby_begin();
1155  #endif
1156  	}
1157  	runtime_standby_cnt++;
1158  }
1159  
1160  void
1161  runtime_del_weak_ref(void)
1162  {
1163  	if (!pid1_magic) {
1164  #if !TARGET_OS_EMBEDDED
1165  		_vproc_standby_end();
1166  #endif
1167  	}
1168  	runtime_standby_cnt--;
1169  }
1170  
1171  void
1172  runtime_install_timer(void)
1173  {
1174  	if (!pid1_magic && runtime_busy_cnt == 0) {
1175  		launchd_syslog(LOG_PERF, "Gone idle. Installing idle-exit timer.");
1176  		(void)posix_assumes_zero(kevent_mod((uintptr_t)&launchd_runtime_busy_time, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 10, root_jobmgr));
1177  	}
1178  }
1179  
1180  void
1181  runtime_remove_timer(void)
1182  {
1183  	if (!pid1_magic && runtime_busy_cnt > 0) {
1184  		if (runtime_busy_cnt == 1) {
1185  			launchd_syslog(LOG_PERF, "No longer idle. Removing idle-exit timer.");	
1186  		}
1187  		(void)posix_assumes_zero(kevent_mod((uintptr_t)&launchd_runtime_busy_time, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
1188  	}
1189  }
1190  
1191  kern_return_t
1192  catch_mach_exception_raise(mach_port_t exception_port __attribute__((unused)), mach_port_t thread, mach_port_t task,
1193  		exception_type_t exception, mach_exception_data_t code, mach_msg_type_number_t codeCnt)
1194  {
1195  	pid_t p4t = -1;
1196  
1197  	(void)os_assumes_zero(pid_for_task(task, &p4t));
1198  
1199  	launchd_syslog(LOG_NOTICE, "%s(): PID: %u thread: 0x%x type: 0x%x code: %p codeCnt: 0x%x",
1200  			__func__, p4t, thread, exception, code, codeCnt);
1201  
1202  	(void)os_assumes_zero(launchd_mport_deallocate(thread));
1203  	(void)os_assumes_zero(launchd_mport_deallocate(task));
1204  
1205  	return KERN_SUCCESS;
1206  }
1207  
1208  kern_return_t
1209  catch_mach_exception_raise_state(mach_port_t exception_port __attribute__((unused)),
1210  		exception_type_t exception, const mach_exception_data_t code, mach_msg_type_number_t codeCnt,
1211  		int *flavor, const thread_state_t old_state, mach_msg_type_number_t old_stateCnt,
1212  		thread_state_t new_state, mach_msg_type_number_t *new_stateCnt)
1213  {
1214  	launchd_syslog(LOG_NOTICE, "%s(): type: 0x%x code: %p codeCnt: 0x%x flavor: %p old_state: %p old_stateCnt: 0x%x new_state: %p new_stateCnt: %p",
1215  			__func__, exception, code, codeCnt, flavor, old_state, old_stateCnt, new_state, new_stateCnt);
1216  
1217  	memcpy(new_state, old_state, old_stateCnt * sizeof(old_state[0]));
1218  	*new_stateCnt = old_stateCnt;
1219  
1220  	return KERN_SUCCESS;
1221  }
1222  
1223  kern_return_t
1224  catch_mach_exception_raise_state_identity(mach_port_t exception_port __attribute__((unused)), mach_port_t thread, mach_port_t task,
1225  		exception_type_t exception, mach_exception_data_t code, mach_msg_type_number_t codeCnt,
1226  		int *flavor, thread_state_t old_state, mach_msg_type_number_t old_stateCnt,
1227  		thread_state_t new_state, mach_msg_type_number_t *new_stateCnt)
1228  {
1229  	pid_t p4t = -1;
1230  
1231  	(void)os_assumes_zero(pid_for_task(task, &p4t));
1232  
1233  	launchd_syslog(LOG_NOTICE, "%s(): PID: %u thread: 0x%x type: 0x%x code: %p codeCnt: 0x%x flavor: %p old_state: %p old_stateCnt: 0x%x new_state: %p new_stateCnt: %p",
1234  			__func__, p4t, thread, exception, code, codeCnt, flavor, old_state, old_stateCnt, new_state, new_stateCnt);
1235  
1236  	memcpy(new_state, old_state, old_stateCnt * sizeof(old_state[0]));
1237  	*new_stateCnt = old_stateCnt;
1238  
1239  	(void)os_assumes_zero(launchd_mport_deallocate(thread));
1240  	(void)os_assumes_zero(launchd_mport_deallocate(task));
1241  
1242  	return KERN_SUCCESS;
1243  }
1244  
1245  // FIXME: should this be thread safe? With dispatch_once?
1246  uint64_t
1247  runtime_get_uniqueid(void)
1248  {
1249  	static bool once;
1250  	static uint64_t uniqueid;
1251  	if (unlikely(!once)) {
1252  		once = true;
1253  
1254  		struct proc_uniqidentifierinfo info;
1255  		int size;
1256  		size = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 0, &info, sizeof(info));
1257  		if (size == PROC_PIDUNIQIDENTIFIERINFO_SIZE) {
1258  			uniqueid = info.p_uniqueid;
1259  		}
1260  	}
1261  	return uniqueid;
1262  }
1263  
1264  void
1265  launchd_log_vm_stats(void)
1266  {
1267  	static struct vm_statistics orig_stats;
1268  	static bool did_first_pass;
1269  	unsigned int count = HOST_VM_INFO_COUNT;
1270  	struct vm_statistics stats, *statsp;
1271  	mach_port_t mhs = mach_host_self();
1272  
1273  	statsp = did_first_pass ? &stats : &orig_stats;
1274  
1275  	if (os_assumes_zero(host_statistics(mhs, HOST_VM_INFO, (host_info_t)statsp, &count)) != KERN_SUCCESS) {
1276  		return;
1277  	}
1278  
1279  	if (count != HOST_VM_INFO_COUNT) {
1280  		(void)os_assumes_zero(count);
1281  	}
1282  
1283  	if (did_first_pass) {
1284  		launchd_syslog(LOG_DEBUG, "VM statistics (now - orig): Free: %d Active: %d Inactive: %d Reactivations: %d PageIns: %d PageOuts: %d Faults: %d COW-Faults: %d Purgeable: %d Purges: %d",
1285  				stats.free_count - orig_stats.free_count,
1286  				stats.active_count - orig_stats.active_count,
1287  				stats.inactive_count - orig_stats.inactive_count,
1288  				stats.reactivations - orig_stats.reactivations,
1289  				stats.pageins - orig_stats.pageins,
1290  				stats.pageouts - orig_stats.pageouts,
1291  				stats.faults - orig_stats.faults,
1292  				stats.cow_faults - orig_stats.cow_faults,
1293  				stats.purgeable_count - orig_stats.purgeable_count,
1294  				stats.purges - orig_stats.purges);
1295  	} else {
1296  		launchd_syslog(LOG_DEBUG, "VM statistics (now): Free: %d Active: %d Inactive: %d Reactivations: %d PageIns: %d PageOuts: %d Faults: %d COW-Faults: %d Purgeable: %d Purges: %d",
1297  				orig_stats.free_count,
1298  				orig_stats.active_count,
1299  				orig_stats.inactive_count,
1300  				orig_stats.reactivations,
1301  				orig_stats.pageins,
1302  				orig_stats.pageouts,
1303  				orig_stats.faults,
1304  				orig_stats.cow_faults,
1305  				orig_stats.purgeable_count,
1306  				orig_stats.purges);
1307  
1308  		did_first_pass = true;
1309  	}
1310  
1311  	launchd_mport_deallocate(mhs);
1312  }
1313  
1314  int64_t
1315  runtime_get_wall_time(void)
1316  {
1317  	struct timeval tv;
1318  	int64_t r;
1319  
1320  	(void)posix_assumes_zero(gettimeofday(&tv, NULL));
1321  
1322  	r = tv.tv_sec;
1323  	r *= USEC_PER_SEC;
1324  	r += tv.tv_usec;
1325  
1326  	return r;
1327  }
1328  
1329  uint64_t
1330  runtime_get_opaque_time(void)
1331  {
1332  	return mach_absolute_time();
1333  }
1334  
1335  uint64_t
1336  runtime_get_opaque_time_of_event(void)
1337  {
1338  	return time_of_mach_msg_return;
1339  }
1340  
1341  uint64_t
1342  runtime_get_nanoseconds_since(uint64_t o)
1343  {
1344  	return runtime_opaque_time_to_nano(runtime_get_opaque_time_of_event() - o);
1345  }
1346  
1347  uint64_t
1348  runtime_opaque_time_to_nano(uint64_t o)
1349  {
1350  #if defined(__i386__) || defined(__x86_64__)
1351  	if (unlikely(tbi.numer != tbi.denom)) {
1352  #elif defined(__ppc__) || defined(__ppc64__)
1353  	if (likely(tbi.numer != tbi.denom)) {
1354  #else
1355  	if (tbi.numer != tbi.denom) {
1356  #endif
1357  #ifdef __LP64__
1358  		__uint128_t tmp = o;
1359  		tmp *= tbi.numer;
1360  		tmp /= tbi.denom;
1361  		o = tmp;
1362  #else
1363  		if (o <= tbi_safe_math_max) {
1364  			o *= tbi.numer;
1365  			o /= tbi.denom;
1366  		} else {
1367  			double d = o;
1368  			d *= tbi_float_val;
1369  			o = d;
1370  		}
1371  #endif
1372  	}
1373  
1374  	return o;
1375  }
1376  
1377  void
1378  do_file_init(void)
1379  {
1380  	struct stat sb;
1381  
1382  	os_assert_zero(mach_timebase_info(&tbi));
1383  	tbi_float_val = tbi.numer;
1384  	tbi_float_val /= tbi.denom;
1385  	tbi_safe_math_max = UINT64_MAX / tbi.numer;
1386  
1387  	launchd_system_start = runtime_get_wall_time();
1388  
1389  	if (getpid() == 1) {
1390  		pid1_magic = true;
1391  	}
1392  
1393  	if (stat("/AppleInternal", &sb) == 0 && stat("/var/db/disableAppleInternal", &sb) == -1) {
1394  		launchd_apple_internal = true;
1395  	}
1396  
1397  	if (config_check(".launchd_use_gmalloc", sb)) {
1398  		launchd_use_gmalloc = true;
1399  	}
1400  
1401  	if (config_check(".launchd_log_shutdown", sb)) {
1402  		launchd_log_shutdown = true;
1403  	}
1404  
1405  	if (config_check(".launchd_log_debug", sb)) {
1406  		launchd_log_debug = true;
1407  	}
1408  
1409  	if (config_check(".launchd_log_perf", sb)) {
1410  		launchd_log_perf = true;
1411  	}
1412  
1413  	if (config_check("/etc/rc.cdrom", sb)) {
1414  		launchd_osinstaller = true;
1415  	}
1416  
1417  	if (!pid1_magic && config_check(".launchd_allow_global_dyld_envvars", sb)) {
1418  		launchd_allow_global_dyld_envvars = true;
1419  	}
1420  
1421  	char buff[1024];
1422  	size_t len = sizeof(buff) - 1;
1423  	int r = pid1_magic ? sysctlbyname("kern.bootargs", buff, &len, NULL, 0) : -1;
1424  	if (r == 0) {
1425  		if (strnstr(buff, "-v", len)) {
1426  			launchd_verbose_boot = true;
1427  		}
1428  		if (strnstr(buff, "launchd_trap_sigkill_bugs", len)) {
1429  			launchd_trap_sigkill_bugs = true;
1430  		}
1431  		if (strnstr(buff, "launchd_no_jetsam_perm_check", len)) {
1432  			launchd_no_jetsam_perm_check = true;
1433  		}
1434  	}
1435  
1436  	len = sizeof(buff) - 1;
1437  #if TARGET_OS_EMBEDDED
1438  	r = sysctlbyname("hw.machine", buff, &len, NULL, 0);
1439  	if (r == 0) {
1440  		if (strnstr(buff, "AppleTV", len)) {
1441  			launchd_appletv = true;
1442  		}
1443  	}
1444  #endif
1445  
1446  #if !TARGET_OS_EMBEDDED
1447  	if (pid1_magic && launchd_verbose_boot && config_check(".launchd_shutdown_debugging", sb)) {
1448  		launchd_shutdown_debugging = true;
1449  	}
1450  #else
1451  	if (pid1_magic && config_check(".launchd_shutdown_debugging", sb)) {
1452  		launchd_shutdown_debugging = true;
1453  	}
1454  #endif
1455  }