/ duct-tape / xnu / osfmk / ipc / ipc_kmsg.c
ipc_kmsg.c
   1  /*
   2   * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
   3   *
   4   * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
   5   *
   6   * This file contains Original Code and/or Modifications of Original Code
   7   * as defined in and that are subject to the Apple Public Source License
   8   * Version 2.0 (the 'License'). You may not use this file except in
   9   * compliance with the License. The rights granted to you under the License
  10   * may not be used to create, or enable the creation or redistribution of,
  11   * unlawful or unlicensed copies of an Apple operating system, or to
  12   * circumvent, violate, or enable the circumvention or violation of, any
  13   * terms of an Apple operating system software license agreement.
  14   *
  15   * Please obtain a copy of the License at
  16   * http://www.opensource.apple.com/apsl/ and read it before using this file.
  17   *
  18   * The Original Code and all software distributed under the License are
  19   * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  20   * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  21   * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  22   * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  23   * Please see the License for the specific language governing rights and
  24   * limitations under the License.
  25   *
  26   * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  27   */
  28  /*
  29   * @OSF_COPYRIGHT@
  30   */
  31  /*
  32   * Mach Operating System
  33   * Copyright (c) 1991,1990,1989 Carnegie Mellon University
  34   * All Rights Reserved.
  35   *
  36   * Permission to use, copy, modify and distribute this software and its
  37   * documentation is hereby granted, provided that both the copyright
  38   * notice and this permission notice appear in all copies of the
  39   * software, derivative works or modified versions, and any portions
  40   * thereof, and that both notices appear in supporting documentation.
  41   *
  42   * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  43   * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  44   * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  45   *
  46   * Carnegie Mellon requests users of this software to return to
  47   *
  48   *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  49   *  School of Computer Science
  50   *  Carnegie Mellon University
  51   *  Pittsburgh PA 15213-3890
  52   *
  53   * any improvements or extensions that they make and grant Carnegie Mellon
  54   * the rights to redistribute these changes.
  55   */
  56  /*
  57   * NOTICE: This file was modified by McAfee Research in 2004 to introduce
  58   * support for mandatory and extensible security protections.  This notice
  59   * is included in support of clause 2.2 (b) of the Apple Public License,
  60   * Version 2.0.
  61   * Copyright (c) 2005 SPARTA, Inc.
  62   */
  63  /*
  64   */
  65  /*
  66   *	File:	ipc/ipc_kmsg.c
  67   *	Author:	Rich Draves
  68   *	Date:	1989
  69   *
  70   *	Operations on kernel messages.
  71   */
  72  
  73  
  74  #include <mach/mach_types.h>
  75  #include <mach/boolean.h>
  76  #include <mach/kern_return.h>
  77  #include <mach/message.h>
  78  #include <mach/port.h>
  79  #include <mach/vm_map.h>
  80  #include <mach/mach_vm.h>
  81  #include <mach/vm_statistics.h>
  82  
  83  #include <kern/kern_types.h>
  84  #include <kern/assert.h>
  85  #include <kern/debug.h>
  86  #include <kern/ipc_kobject.h>
  87  #include <kern/kalloc.h>
  88  #include <kern/zalloc.h>
  89  #include <kern/processor.h>
  90  #include <kern/thread.h>
  91  #include <kern/sched_prim.h>
  92  #include <kern/misc_protos.h>
  93  #include <kern/cpu_data.h>
  94  #include <kern/policy_internal.h>
  95  #include <kern/mach_filter.h>
  96  
  97  #include <pthread/priority_private.h>
  98  
  99  #include <machine/limits.h>
 100  
 101  #include <vm/vm_map.h>
 102  #include <vm/vm_object.h>
 103  #include <vm/vm_kern.h>
 104  
 105  #include <ipc/port.h>
 106  #include <ipc/ipc_types.h>
 107  #include <ipc/ipc_entry.h>
 108  #include <ipc/ipc_kmsg.h>
 109  #include <ipc/ipc_notify.h>
 110  #include <ipc/ipc_object.h>
 111  #include <ipc/ipc_space.h>
 112  #include <ipc/ipc_port.h>
 113  #include <ipc/ipc_right.h>
 114  #include <ipc/ipc_hash.h>
 115  #include <ipc/ipc_table.h>
 116  #include <ipc/ipc_importance.h>
 117  #if MACH_FLIPC
 118  #include <kern/mach_node.h>
 119  #include <ipc/flipc.h>
 120  #endif
 121  
 122  #include <os/overflow.h>
 123  
 124  #include <security/mac_mach_internal.h>
 125  
 126  #include <device/device_server.h>
 127  
 128  #include <string.h>
 129  
 130  #ifdef ppc
 131  #include <ppc/Firmware.h>
 132  #include <ppc/low_trace.h>
 133  #endif
 134  
 135  #if DEBUG
 136  #define DEBUG_MSGS_K64 1
 137  #endif
 138  
 139  #include <sys/kdebug.h>
 140  #include <libkern/OSAtomic.h>
 141  
 142  #include <libkern/crypto/sha2.h>
 143  
 144  #include <ptrauth.h>
 145  #if __has_feature(ptrauth_calls)
 146  #include <libkern/ptrauth_utils.h>
 147  #endif
 148  
 149  #pragma pack(4)
 150  
 151  typedef struct{
 152  	mach_msg_bits_t       msgh_bits;
 153  	mach_msg_size_t       msgh_size;
 154  	mach_port_name_t      msgh_remote_port;
 155  	mach_port_name_t      msgh_local_port;
 156  	mach_port_name_t      msgh_voucher_port;
 157  	mach_msg_id_t         msgh_id;
 158  } mach_msg_legacy_header_t;
 159  
 160  typedef struct{
 161  	mach_msg_legacy_header_t       header;
 162  	mach_msg_body_t         body;
 163  } mach_msg_legacy_base_t;
 164  
 165  typedef struct{
 166  	mach_port_name_t                              name;
 167  	mach_msg_size_t                               pad1;
 168  	uint32_t                                              pad2 : 16;
 169  	mach_msg_type_name_t                  disposition : 8;
 170  	mach_msg_descriptor_type_t    type : 8;
 171  } mach_msg_legacy_port_descriptor_t;
 172  
 173  
 174  typedef union{
 175  	mach_msg_legacy_port_descriptor_t                     port;
 176  	mach_msg_ool_descriptor32_t           out_of_line32;
 177  	mach_msg_ool_ports_descriptor32_t     ool_ports32;
 178  	mach_msg_guarded_port_descriptor32_t  guarded_port32;
 179  	mach_msg_type_descriptor_t                    type;
 180  } mach_msg_legacy_descriptor_t;
 181  
 182  #pragma pack()
 183  
 184  #define LEGACY_HEADER_SIZE_DELTA ((mach_msg_size_t)(sizeof(mach_msg_header_t) - sizeof(mach_msg_legacy_header_t)))
 185  
 186  // END LP64 fixes
 187  
 188  #if __has_feature(ptrauth_calls)
 189  typedef uintptr_t ikm_sig_scratch_t;
 190  
 191  static void
 192  ikm_init_sig(
 193  	__unused ipc_kmsg_t kmsg,
 194  	ikm_sig_scratch_t *scratchp)
 195  {
 196  	*scratchp = OS_PTRAUTH_DISCRIMINATOR("kmsg.ikm_signature");
 197  }
 198  
 199  static void
 200  ikm_chunk_sig(
 201  	ipc_kmsg_t kmsg,
 202  	void *data,
 203  	size_t len,
 204  	ikm_sig_scratch_t *scratchp)
 205  {
 206  	int ptrauth_flags;
 207  	void *trailerp;
 208  
 209  	/*
 210  	 * if we happen to be doing the trailer chunk,
 211  	 * diversify with the ptrauth-ed trailer pointer -
 212  	 * as that is unchanging for the kmsg
 213  	 */
 214  	trailerp = (void *)
 215  	    ((vm_offset_t)kmsg->ikm_header +
 216  	    mach_round_msg(kmsg->ikm_header->msgh_size));
 217  
 218  	ptrauth_flags = (data == trailerp) ? PTRAUTH_ADDR_DIVERSIFY : 0;
 219  	*scratchp = ptrauth_utils_sign_blob_generic(data, len, *scratchp, ptrauth_flags);
 220  }
 221  
 222  static uintptr_t
 223  ikm_finalize_sig(
 224  	__unused ipc_kmsg_t kmsg,
 225  	ikm_sig_scratch_t *scratchp)
 226  {
 227  	return *scratchp;
 228  }
 229  
 230  #elif defined(CRYPTO_SHA2) && !defined(__x86_64__) && !defined(__arm__)
 231  
 232  typedef SHA256_CTX ikm_sig_scratch_t;
 233  
 234  static void
 235  ikm_init_sig(
 236  	__unused ipc_kmsg_t kmsg,
 237  	ikm_sig_scratch_t *scratchp)
 238  {
 239  	SHA256_Init(scratchp);
 240  	SHA256_Update(scratchp, &vm_kernel_addrhash_salt_ext, sizeof(uint64_t));
 241  }
 242  
 243  static void
 244  ikm_chunk_sig(
 245  	__unused ipc_kmsg_t kmsg,
 246  	void *data,
 247  	size_t len,
 248  	ikm_sig_scratch_t *scratchp)
 249  {
 250  	SHA256_Update(scratchp, data, len);
 251  }
 252  
 253  static uintptr_t
 254  ikm_finalize_sig(
 255  	__unused ipc_kmsg_t kmsg,
 256  	ikm_sig_scratch_t *scratchp)
 257  {
 258  	uintptr_t sha_digest[SHA256_DIGEST_LENGTH / sizeof(uintptr_t)];
 259  
 260  	SHA256_Final((uint8_t *)sha_digest, scratchp);
 261  
 262  	/*
 263  	 * Only use one uintptr_t sized part of result for space and compat reasons.
 264  	 * Truncation is better than XOR'ing the chunks together in hopes of higher
 265  	 * entropy - because of its lower risk of collisions.
 266  	 */
 267  	return *sha_digest;
 268  }
 269  
 270  #else
 271  /* Stubbed out implementation (for __x86_64__, __arm__ for now) */
 272  
 273  typedef uintptr_t ikm_sig_scratch_t;
 274  
 275  static void
 276  ikm_init_sig(
 277  	__unused ipc_kmsg_t kmsg,
 278  	ikm_sig_scratch_t *scratchp)
 279  {
 280  	*scratchp = 0;
 281  }
 282  
 283  static void
 284  ikm_chunk_sig(
 285  	__unused ipc_kmsg_t kmsg,
 286  	__unused void *data,
 287  	__unused size_t len,
 288  	__unused ikm_sig_scratch_t *scratchp)
 289  {
 290  	return;
 291  }
 292  
 293  static uintptr_t
 294  ikm_finalize_sig(
 295  	__unused ipc_kmsg_t kmsg,
 296  	ikm_sig_scratch_t *scratchp)
 297  {
 298  	return *scratchp;
 299  }
 300  
 301  #endif
 302  
 303  static void
 304  ikm_header_sig(
 305  	ipc_kmsg_t kmsg,
 306  	ikm_sig_scratch_t *scratchp)
 307  {
 308  	mach_msg_size_t dsc_count;
 309  	mach_msg_base_t base;
 310  	boolean_t complex;
 311  
 312  	/* take a snapshot of the message header/body-count */
 313  	base.header = *kmsg->ikm_header;
 314  	complex = ((base.header.msgh_bits & MACH_MSGH_BITS_COMPLEX) != 0);
 315  	if (complex) {
 316  		dsc_count = ((mach_msg_body_t *)(kmsg->ikm_header + 1))->msgh_descriptor_count;
 317  	} else {
 318  		dsc_count = 0;
 319  	}
 320  	base.body.msgh_descriptor_count = dsc_count;
 321  
 322  	/* compute sig of a copy of the header with all varying bits masked off */
 323  	base.header.msgh_bits &= MACH_MSGH_BITS_USER;
 324  	base.header.msgh_bits &= ~MACH_MSGH_BITS_VOUCHER_MASK;
 325  	ikm_chunk_sig(kmsg, &base, sizeof(mach_msg_base_t), scratchp);
 326  }
 327  
 328  static void
 329  ikm_trailer_sig(
 330  	ipc_kmsg_t kmsg,
 331  	ikm_sig_scratch_t *scratchp)
 332  {
 333  	mach_msg_max_trailer_t *trailerp;
 334  
 335  	/* Add sig of the trailer contents */
 336  	trailerp = (mach_msg_max_trailer_t *)
 337  	    ((vm_offset_t)kmsg->ikm_header +
 338  	    mach_round_msg(kmsg->ikm_header->msgh_size));
 339  	ikm_chunk_sig(kmsg, trailerp, sizeof(*trailerp), scratchp);
 340  }
 341  
 342  /* Compute the signature for the body bits of a message */
 343  static void
 344  ikm_body_sig(
 345  	ipc_kmsg_t        kmsg,
 346  	ikm_sig_scratch_t *scratchp)
 347  {
 348  	mach_msg_descriptor_t *kern_dsc;
 349  	mach_msg_size_t dsc_count;
 350  	mach_msg_body_t *body;
 351  	mach_msg_size_t i;
 352  
 353  	if ((kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0) {
 354  		return;
 355  	}
 356  	body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
 357  	dsc_count = body->msgh_descriptor_count;
 358  
 359  	if (dsc_count == 0) {
 360  		return;
 361  	}
 362  
 363  	kern_dsc = (mach_msg_descriptor_t *) (body + 1);
 364  
 365  	/* Compute the signature for the whole descriptor array */
 366  	ikm_chunk_sig(kmsg, kern_dsc, sizeof(*kern_dsc) * dsc_count, scratchp);
 367  
 368  	/* look for descriptor contents that need a signature */
 369  	for (i = 0; i < dsc_count; i++) {
 370  		switch (kern_dsc[i].type.type) {
 371  		case MACH_MSG_PORT_DESCRIPTOR:
 372  		case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
 373  		case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
 374  		case MACH_MSG_OOL_DESCRIPTOR:
 375  			break;
 376  
 377  		case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
 378  			mach_msg_ool_ports_descriptor_t *ports_dsc;
 379  
 380  			/* Compute sig for the port/object pointers */
 381  			ports_dsc = (mach_msg_ool_ports_descriptor_t *)&kern_dsc[i];
 382  			ikm_chunk_sig(kmsg, ports_dsc->address, ports_dsc->count * sizeof(ipc_object_t), scratchp);
 383  			break;
 384  		}
 385  		default: {
 386  			panic("ipc_kmsg_body_sig: invalid message descriptor");
 387  		}
 388  		}
 389  	}
 390  }
 391  
 392  static void
 393  ikm_sign(
 394  	ipc_kmsg_t kmsg)
 395  {
 396  	ikm_sig_scratch_t scratch;
 397  	uintptr_t sig;
 398  
 399  	zone_require(ipc_kmsg_zone, kmsg);
 400  
 401  	ikm_init_sig(kmsg, &scratch);
 402  
 403  	ikm_header_sig(kmsg, &scratch);
 404  #if IKM_PARTIAL_SIG
 405  	/* save off partial signature for just header */
 406  	sig = ikm_finalize_sig(kmsg, &scratch);
 407  	kmsg->ikm_header_sig = sig;
 408  #endif
 409  
 410  	ikm_trailer_sig(kmsg, &scratch);
 411  #if IKM_PARTIAL_SIG
 412  	/* save off partial signature for header+trailer */
 413  	sig = ikm_finalize_sig(kmsg, &scratch);
 414  	kmsg->ikm_headtrail_sig = sig;
 415  #endif
 416  
 417  	ikm_body_sig(kmsg, &scratch);
 418  	sig = ikm_finalize_sig(kmsg, &scratch);
 419  	kmsg->ikm_signature = sig;
 420  }
 421  
 422  unsigned int ikm_signature_failures;
 423  unsigned int ikm_signature_failure_id;
 424  #if (DEVELOPMENT || DEBUG)
 425  unsigned int ikm_signature_panic_disable;
 426  unsigned int ikm_signature_header_failures;
 427  unsigned int ikm_signature_trailer_failures;
 428  #endif
 429  
 430  static void
 431  ikm_validate_sig(
 432  	ipc_kmsg_t kmsg)
 433  {
 434  	ikm_sig_scratch_t scratch;
 435  	uintptr_t expected;
 436  	uintptr_t sig;
 437  	char *str;
 438  
 439  	zone_require(ipc_kmsg_zone, kmsg);
 440  
 441  	ikm_init_sig(kmsg, &scratch);
 442  
 443  	ikm_header_sig(kmsg, &scratch);
 444  #if IKM_PARTIAL_SIG
 445  	/* Do partial evaluation of just the header signature */
 446  	sig = ikm_finalize_sig(kmsg, &scratch);
 447  	expected = kmsg->ikm_header_sig;
 448  	if (sig != expected) {
 449  		ikm_signature_header_failures++;
 450  		str = "header";
 451  		goto failure;
 452  	}
 453  #endif
 454  
 455  	ikm_trailer_sig(kmsg, &scratch);
 456  #if IKM_PARTIAL_SIG
 457  	/* Do partial evaluation of header+trailer signature */
 458  	sig = ikm_finalize_sig(kmsg, &scratch);
 459  	expected = kmsg->ikm_headtrail_sig;
 460  	if (sig != expected) {
 461  		ikm_signature_trailer_failures++;
 462  		str = "trailer";
 463  		goto failure;
 464  	}
 465  #endif
 466  
 467  	ikm_body_sig(kmsg, &scratch);
 468  	sig = ikm_finalize_sig(kmsg, &scratch);
 469  
 470  	expected = kmsg->ikm_signature;
 471  	if (sig != expected) {
 472  		ikm_signature_failures++;
 473  		str = "full";
 474  
 475  #if IKM_PARTIAL_SIG
 476  failure:
 477  #endif
 478  		{
 479  			mach_msg_id_t id = kmsg->ikm_header->msgh_id;
 480  
 481  			ikm_signature_failure_id = id;
 482  #if (DEVELOPMENT || DEBUG)
 483  			if (ikm_signature_panic_disable) {
 484  				return;
 485  			}
 486  #endif
 487  			panic("ikm_validate_sig: %s signature mismatch: kmsg=0x%p, id=%d, sig=0x%zx (expected 0x%zx)",
 488  			    str, kmsg, id, sig, expected);
 489  		}
 490  	}
 491  }
 492  
 493  #if DEBUG_MSGS_K64
 494  extern void ipc_pset_print64(
 495  	ipc_pset_t      pset);
 496  
 497  extern void     ipc_kmsg_print64(
 498  	ipc_kmsg_t      kmsg,
 499  	const char      *str);
 500  
 501  extern void     ipc_msg_print64(
 502  	mach_msg_header_t       *msgh);
 503  
 504  extern ipc_port_t ipc_name_to_data64(
 505  	task_t                  task,
 506  	mach_port_name_t        name);
 507  
 508  /*
 509   * Forward declarations
 510   */
 511  void ipc_msg_print_untyped64(
 512  	mach_msg_body_t         *body);
 513  
 514  const char * ipc_type_name64(
 515  	int             type_name,
 516  	boolean_t       received);
 517  
 518  void ipc_print_type_name64(
 519  	int     type_name);
 520  
 521  const char *
 522  msgh_bit_decode64(
 523  	mach_msg_bits_t bit);
 524  
 525  const char *
 526  mm_copy_options_string64(
 527  	mach_msg_copy_options_t option);
 528  
 529  void db_print_msg_uid64(mach_msg_header_t *);
 530  
 531  static void
 532  ipc_msg_body_print64(void *body, int size)
 533  {
 534  	uint32_t        *word = (uint32_t *) body;
 535  	uint32_t        *end  = (uint32_t *)(((uintptr_t) body) + size
 536  	    - sizeof(mach_msg_header_t));
 537  	int             i;
 538  
 539  	kprintf("  body(%p-%p):\n    %p: ", body, end, word);
 540  	for (;;) {
 541  		for (i = 0; i < 8; i++, word++) {
 542  			if (word >= end) {
 543  				kprintf("\n");
 544  				return;
 545  			}
 546  			kprintf("%08x ", *word);
 547  		}
 548  		kprintf("\n    %p: ", word);
 549  	}
 550  }
 551  
 552  
 553  const char *
 554  ipc_type_name64(
 555  	int             type_name,
 556  	boolean_t       received)
 557  {
 558  	switch (type_name) {
 559  	case MACH_MSG_TYPE_PORT_NAME:
 560  		return "port_name";
 561  
 562  	case MACH_MSG_TYPE_MOVE_RECEIVE:
 563  		if (received) {
 564  			return "port_receive";
 565  		} else {
 566  			return "move_receive";
 567  		}
 568  
 569  	case MACH_MSG_TYPE_MOVE_SEND:
 570  		if (received) {
 571  			return "port_send";
 572  		} else {
 573  			return "move_send";
 574  		}
 575  
 576  	case MACH_MSG_TYPE_MOVE_SEND_ONCE:
 577  		if (received) {
 578  			return "port_send_once";
 579  		} else {
 580  			return "move_send_once";
 581  		}
 582  
 583  	case MACH_MSG_TYPE_COPY_SEND:
 584  		return "copy_send";
 585  
 586  	case MACH_MSG_TYPE_MAKE_SEND:
 587  		return "make_send";
 588  
 589  	case MACH_MSG_TYPE_MAKE_SEND_ONCE:
 590  		return "make_send_once";
 591  
 592  	default:
 593  		return (char *) 0;
 594  	}
 595  }
 596  
 597  void
 598  ipc_print_type_name64(
 599  	int     type_name)
 600  {
 601  	const char *name = ipc_type_name64(type_name, TRUE);
 602  	if (name) {
 603  		kprintf("%s", name);
 604  	} else {
 605  		kprintf("type%d", type_name);
 606  	}
 607  }
 608  
 609  /*
 610   * ipc_kmsg_print64	[ debug ]
 611   */
 612  void
 613  ipc_kmsg_print64(
 614  	ipc_kmsg_t      kmsg,
 615  	const char      *str)
 616  {
 617  	kprintf("%s kmsg=%p:\n", str, kmsg);
 618  	kprintf("  next=%p, prev=%p, size=%d",
 619  	    kmsg->ikm_next,
 620  	    kmsg->ikm_prev,
 621  	    kmsg->ikm_size);
 622  	kprintf("\n");
 623  	ipc_msg_print64(kmsg->ikm_header);
 624  }
 625  
 626  const char *
 627  msgh_bit_decode64(
 628  	mach_msg_bits_t bit)
 629  {
 630  	switch (bit) {
 631  	case MACH_MSGH_BITS_COMPLEX:        return "complex";
 632  	case MACH_MSGH_BITS_CIRCULAR:       return "circular";
 633  	default:                            return (char *) 0;
 634  	}
 635  }
 636  
 637  /*
 638   * ipc_msg_print64	[ debug ]
 639   */
 640  void
 641  ipc_msg_print64(
 642  	mach_msg_header_t       *msgh)
 643  {
 644  	mach_msg_bits_t mbits;
 645  	unsigned int    bit, i;
 646  	const char      *bit_name;
 647  	int             needs_comma;
 648  
 649  	mbits = msgh->msgh_bits;
 650  	kprintf("  msgh_bits=0x%x: l=0x%x,r=0x%x\n",
 651  	    mbits,
 652  	    MACH_MSGH_BITS_LOCAL(msgh->msgh_bits),
 653  	    MACH_MSGH_BITS_REMOTE(msgh->msgh_bits));
 654  
 655  	mbits = MACH_MSGH_BITS_OTHER(mbits) & MACH_MSGH_BITS_USED;
 656  	kprintf("  decoded bits:  ");
 657  	needs_comma = 0;
 658  	for (i = 0, bit = 1; i < sizeof(mbits) * 8; ++i, bit <<= 1) {
 659  		if ((mbits & bit) == 0) {
 660  			continue;
 661  		}
 662  		bit_name = msgh_bit_decode64((mach_msg_bits_t)bit);
 663  		if (bit_name) {
 664  			kprintf("%s%s", needs_comma ? "," : "", bit_name);
 665  		} else {
 666  			kprintf("%sunknown(0x%x),", needs_comma ? "," : "", bit);
 667  		}
 668  		++needs_comma;
 669  	}
 670  	if (msgh->msgh_bits & ~MACH_MSGH_BITS_USED) {
 671  		kprintf("%sunused=0x%x,", needs_comma ? "," : "",
 672  		    msgh->msgh_bits & ~MACH_MSGH_BITS_USED);
 673  	}
 674  	kprintf("\n");
 675  
 676  	needs_comma = 1;
 677  	if (msgh->msgh_remote_port) {
 678  		kprintf("  remote=%p(", msgh->msgh_remote_port);
 679  		ipc_print_type_name64(MACH_MSGH_BITS_REMOTE(msgh->msgh_bits));
 680  		kprintf(")");
 681  	} else {
 682  		kprintf("  remote=null");
 683  	}
 684  
 685  	if (msgh->msgh_local_port) {
 686  		kprintf("%slocal=%p(", needs_comma ? "," : "",
 687  		    msgh->msgh_local_port);
 688  		ipc_print_type_name64(MACH_MSGH_BITS_LOCAL(msgh->msgh_bits));
 689  		kprintf(")\n");
 690  	} else {
 691  		kprintf("local=null\n");
 692  	}
 693  
 694  	kprintf("  msgh_id=%d, size=%d\n",
 695  	    msgh->msgh_id,
 696  	    msgh->msgh_size);
 697  
 698  	if (mbits & MACH_MSGH_BITS_COMPLEX) {
 699  		ipc_msg_print_untyped64((mach_msg_body_t *) (msgh + 1));
 700  	}
 701  
 702  	ipc_msg_body_print64((void *)(msgh + 1), msgh->msgh_size);
 703  }
 704  
 705  
 706  const char *
 707  mm_copy_options_string64(
 708  	mach_msg_copy_options_t option)
 709  {
 710  	const char      *name;
 711  
 712  	switch (option) {
 713  	case MACH_MSG_PHYSICAL_COPY:
 714  		name = "PHYSICAL";
 715  		break;
 716  	case MACH_MSG_VIRTUAL_COPY:
 717  		name = "VIRTUAL";
 718  		break;
 719  	case MACH_MSG_OVERWRITE:
 720  		name = "OVERWRITE(DEPRECATED)";
 721  		break;
 722  	case MACH_MSG_ALLOCATE:
 723  		name = "ALLOCATE";
 724  		break;
 725  	case MACH_MSG_KALLOC_COPY_T:
 726  		name = "KALLOC_COPY_T";
 727  		break;
 728  	default:
 729  		name = "unknown";
 730  		break;
 731  	}
 732  	return name;
 733  }
 734  
 735  void
 736  ipc_msg_print_untyped64(
 737  	mach_msg_body_t         *body)
 738  {
 739  	mach_msg_descriptor_t       *saddr, *send;
 740  	mach_msg_descriptor_type_t  type;
 741  
 742  	kprintf("  %d descriptors: \n", body->msgh_descriptor_count);
 743  
 744  	saddr = (mach_msg_descriptor_t *) (body + 1);
 745  	send = saddr + body->msgh_descriptor_count;
 746  
 747  	for (; saddr < send; saddr++) {
 748  		type = saddr->type.type;
 749  
 750  		switch (type) {
 751  		case MACH_MSG_PORT_DESCRIPTOR: {
 752  			mach_msg_port_descriptor_t *dsc;
 753  
 754  			dsc = &saddr->port;
 755  			kprintf("    PORT name = %p disp = ", dsc->name);
 756  			ipc_print_type_name64(dsc->disposition);
 757  			kprintf("\n");
 758  			break;
 759  		}
 760  		case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
 761  		case MACH_MSG_OOL_DESCRIPTOR: {
 762  			mach_msg_ool_descriptor_t *dsc;
 763  
 764  			dsc = (mach_msg_ool_descriptor_t *) &saddr->out_of_line;
 765  			kprintf("    OOL%s addr = %p size = 0x%x copy = %s %s\n",
 766  			    type == MACH_MSG_OOL_DESCRIPTOR ? "" : " VOLATILE",
 767  			    dsc->address, dsc->size,
 768  			    mm_copy_options_string64(dsc->copy),
 769  			    dsc->deallocate ? "DEALLOC" : "");
 770  			break;
 771  		}
 772  		case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
 773  			mach_msg_ool_ports_descriptor_t *dsc;
 774  
 775  			dsc = (mach_msg_ool_ports_descriptor_t *) &saddr->ool_ports;
 776  
 777  			kprintf("    OOL_PORTS addr = %p count = 0x%x ",
 778  			    dsc->address, dsc->count);
 779  			kprintf("disp = ");
 780  			ipc_print_type_name64(dsc->disposition);
 781  			kprintf(" copy = %s %s\n",
 782  			    mm_copy_options_string64(dsc->copy),
 783  			    dsc->deallocate ? "DEALLOC" : "");
 784  			break;
 785  		}
 786  		case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
 787  			mach_msg_guarded_port_descriptor_t *dsc;
 788  
 789  			dsc = (mach_msg_guarded_port_descriptor_t *)&saddr->guarded_port;
 790  			kprintf("    GUARDED_PORT name = %p flags = 0x%x disp = ", dsc->name, dsc->flags);
 791  			ipc_print_type_name64(dsc->disposition);
 792  			kprintf("\n");
 793  			break;
 794  		}
 795  		default: {
 796  			kprintf("    UNKNOWN DESCRIPTOR 0x%x\n", type);
 797  			break;
 798  		}
 799  		}
 800  	}
 801  }
 802  
 803  #define DEBUG_IPC_KMSG_PRINT(kmsg, string)       \
 804  	__unreachable_ok_push   \
 805  	if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {    \
 806  	        ipc_kmsg_print64(kmsg, string); \
 807  	}       \
 808  	__unreachable_ok_pop
 809  
 810  #define DEBUG_IPC_MSG_BODY_PRINT(body, size)     \
 811  	__unreachable_ok_push   \
 812  	if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {    \
 813  	        ipc_msg_body_print64(body,size);\
 814  	}       \
 815  	__unreachable_ok_pop
 816  #else /* !DEBUG_MSGS_K64 */
 817  #define DEBUG_IPC_KMSG_PRINT(kmsg, string)
 818  #define DEBUG_IPC_MSG_BODY_PRINT(body, size)
 819  #endif  /* !DEBUG_MSGS_K64 */
 820  
 821  extern vm_map_t         ipc_kernel_copy_map;
 822  extern vm_size_t        ipc_kmsg_max_space;
 823  extern const vm_size_t  ipc_kmsg_max_vm_space;
 824  extern const vm_size_t  ipc_kmsg_max_body_space;
 825  extern vm_size_t        msg_ool_size_small;
 826  
 827  #define MSG_OOL_SIZE_SMALL      msg_ool_size_small
 828  
 829  #if defined(__LP64__)
 830  #define MAP_SIZE_DIFFERS(map)   (map->max_offset < MACH_VM_MAX_ADDRESS)
 831  #define OTHER_OOL_DESCRIPTOR    mach_msg_ool_descriptor32_t
 832  #define OTHER_OOL_PORTS_DESCRIPTOR      mach_msg_ool_ports_descriptor32_t
 833  #else
 834  #define MAP_SIZE_DIFFERS(map)   (map->max_offset > VM_MAX_ADDRESS)
 835  #define OTHER_OOL_DESCRIPTOR    mach_msg_ool_descriptor64_t
 836  #define OTHER_OOL_PORTS_DESCRIPTOR      mach_msg_ool_ports_descriptor64_t
 837  #endif
 838  
 839  #define DESC_SIZE_ADJUSTMENT    ((mach_msg_size_t)(sizeof(mach_msg_ool_descriptor64_t) - \
 840  	                         sizeof(mach_msg_ool_descriptor32_t)))
 841  
 842  /* scatter list macros */
 843  
 844  #define SKIP_PORT_DESCRIPTORS(s, c)                                     \
 845  MACRO_BEGIN                                                             \
 846  	if ((s) != MACH_MSG_DESCRIPTOR_NULL) {                          \
 847  	        while ((c) > 0) {                                       \
 848  	                if ((s)->type.type != MACH_MSG_PORT_DESCRIPTOR) \
 849  	                        break;                                  \
 850  	                (s)++; (c)--;                                   \
 851  	        }                                                       \
 852  	        if (c == 0)                                             \
 853  	                (s) = MACH_MSG_DESCRIPTOR_NULL;                 \
 854  	}                                                               \
 855  MACRO_END
 856  
 857  #define INCREMENT_SCATTER(s, c, d)                                      \
 858  MACRO_BEGIN                                                             \
 859  	if ((s) != MACH_MSG_DESCRIPTOR_NULL) {                          \
 860  	    s = (d) ? (mach_msg_descriptor_t *)                         \
 861  	        ((OTHER_OOL_DESCRIPTOR *)(s) + 1) :                     \
 862  	        (s + 1);                                                \
 863  	        (c)--;                                                  \
 864  	}                                                               \
 865  MACRO_END
 866  
 867  #define KMSG_TRACE_FLAG_TRACED     0x000001
 868  #define KMSG_TRACE_FLAG_COMPLEX    0x000002
 869  #define KMSG_TRACE_FLAG_OOLMEM     0x000004
 870  #define KMSG_TRACE_FLAG_VCPY       0x000008
 871  #define KMSG_TRACE_FLAG_PCPY       0x000010
 872  #define KMSG_TRACE_FLAG_SND64      0x000020
 873  #define KMSG_TRACE_FLAG_RAISEIMP   0x000040
 874  #define KMSG_TRACE_FLAG_APP_SRC    0x000080
 875  #define KMSG_TRACE_FLAG_APP_DST    0x000100
 876  #define KMSG_TRACE_FLAG_DAEMON_SRC 0x000200
 877  #define KMSG_TRACE_FLAG_DAEMON_DST 0x000400
 878  #define KMSG_TRACE_FLAG_DST_NDFLTQ 0x000800
 879  #define KMSG_TRACE_FLAG_SRC_NDFLTQ 0x001000
 880  #define KMSG_TRACE_FLAG_DST_SONCE  0x002000
 881  #define KMSG_TRACE_FLAG_SRC_SONCE  0x004000
 882  #define KMSG_TRACE_FLAG_CHECKIN    0x008000
 883  #define KMSG_TRACE_FLAG_ONEWAY     0x010000
 884  #define KMSG_TRACE_FLAG_IOKIT      0x020000
 885  #define KMSG_TRACE_FLAG_SNDRCV     0x040000
 886  #define KMSG_TRACE_FLAG_DSTQFULL   0x080000
 887  #define KMSG_TRACE_FLAG_VOUCHER    0x100000
 888  #define KMSG_TRACE_FLAG_TIMER      0x200000
 889  #define KMSG_TRACE_FLAG_SEMA       0x400000
 890  #define KMSG_TRACE_FLAG_DTMPOWNER  0x800000
 891  #define KMSG_TRACE_FLAG_GUARDED_DESC 0x1000000
 892  
 893  #define KMSG_TRACE_FLAGS_MASK      0x1ffffff
 894  #define KMSG_TRACE_FLAGS_SHIFT     8
 895  
 896  #define KMSG_TRACE_PORTS_MASK      0xff
 897  #define KMSG_TRACE_PORTS_SHIFT     0
 898  
 899  #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD)
 900  #include <stdint.h>
 901  
 902  void
 903  ipc_kmsg_trace_send(ipc_kmsg_t kmsg,
 904      mach_msg_option_t option)
 905  {
 906  	task_t send_task = TASK_NULL;
 907  	ipc_port_t dst_port, src_port;
 908  	boolean_t is_task_64bit;
 909  	mach_msg_header_t *msg;
 910  	mach_msg_trailer_t *trailer;
 911  
 912  	int kotype = 0;
 913  	uint32_t msg_size = 0;
 914  	uint64_t msg_flags = KMSG_TRACE_FLAG_TRACED;
 915  	uint32_t num_ports = 0;
 916  	uint32_t send_pid, dst_pid;
 917  
 918  	/*
 919  	 * check to see not only if ktracing is enabled, but if we will
 920  	 * _actually_ emit the KMSG_INFO tracepoint. This saves us a
 921  	 * significant amount of processing (and a port lock hold) in
 922  	 * the non-tracing case.
 923  	 */
 924  	if (__probable((kdebug_enable & KDEBUG_TRACE) == 0)) {
 925  		return;
 926  	}
 927  	if (!kdebug_debugid_enabled(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO))) {
 928  		return;
 929  	}
 930  
 931  	msg = kmsg->ikm_header;
 932  
 933  	dst_port = msg->msgh_remote_port;
 934  	if (!IPC_PORT_VALID(dst_port)) {
 935  		return;
 936  	}
 937  
 938  	/*
 939  	 * Message properties / options
 940  	 */
 941  	if ((option & (MACH_SEND_MSG | MACH_RCV_MSG)) == (MACH_SEND_MSG | MACH_RCV_MSG)) {
 942  		msg_flags |= KMSG_TRACE_FLAG_SNDRCV;
 943  	}
 944  
 945  	if (msg->msgh_id >= is_iokit_subsystem.start &&
 946  	    msg->msgh_id < is_iokit_subsystem.end + 100) {
 947  		msg_flags |= KMSG_TRACE_FLAG_IOKIT;
 948  	}
 949  	/* magic XPC checkin message id (XPC_MESSAGE_ID_CHECKIN) from libxpc */
 950  	else if (msg->msgh_id == 0x77303074u /* w00t */) {
 951  		msg_flags |= KMSG_TRACE_FLAG_CHECKIN;
 952  	}
 953  
 954  	if (msg->msgh_bits & MACH_MSGH_BITS_RAISEIMP) {
 955  		msg_flags |= KMSG_TRACE_FLAG_RAISEIMP;
 956  	}
 957  
 958  	if (unsafe_convert_port_to_voucher(kmsg->ikm_voucher)) {
 959  		msg_flags |= KMSG_TRACE_FLAG_VOUCHER;
 960  	}
 961  
 962  	/*
 963  	 * Sending task / port
 964  	 */
 965  	send_task = current_task();
 966  	send_pid = task_pid(send_task);
 967  
 968  	if (send_pid != 0) {
 969  		if (task_is_daemon(send_task)) {
 970  			msg_flags |= KMSG_TRACE_FLAG_DAEMON_SRC;
 971  		} else if (task_is_app(send_task)) {
 972  			msg_flags |= KMSG_TRACE_FLAG_APP_SRC;
 973  		}
 974  	}
 975  
 976  	is_task_64bit = (send_task->map->max_offset > VM_MAX_ADDRESS);
 977  	if (is_task_64bit) {
 978  		msg_flags |= KMSG_TRACE_FLAG_SND64;
 979  	}
 980  
 981  	src_port = msg->msgh_local_port;
 982  	if (src_port) {
 983  		if (src_port->ip_messages.imq_qlimit != MACH_PORT_QLIMIT_DEFAULT) {
 984  			msg_flags |= KMSG_TRACE_FLAG_SRC_NDFLTQ;
 985  		}
 986  		switch (MACH_MSGH_BITS_LOCAL(msg->msgh_bits)) {
 987  		case MACH_MSG_TYPE_MOVE_SEND_ONCE:
 988  			msg_flags |= KMSG_TRACE_FLAG_SRC_SONCE;
 989  			break;
 990  		default:
 991  			break;
 992  		}
 993  	} else {
 994  		msg_flags |= KMSG_TRACE_FLAG_ONEWAY;
 995  	}
 996  
 997  
 998  	/*
 999  	 * Destination task / port
1000  	 */
1001  	ip_lock(dst_port);
1002  	if (!ip_active(dst_port)) {
1003  		/* dst port is being torn down */
1004  		dst_pid = (uint32_t)0xfffffff0;
1005  	} else if (dst_port->ip_tempowner) {
1006  		msg_flags |= KMSG_TRACE_FLAG_DTMPOWNER;
1007  		if (IIT_NULL != dst_port->ip_imp_task) {
1008  			dst_pid = task_pid(dst_port->ip_imp_task->iit_task);
1009  		} else {
1010  			dst_pid = (uint32_t)0xfffffff1;
1011  		}
1012  	} else if (dst_port->ip_receiver_name == MACH_PORT_NULL) {
1013  		/* dst_port is otherwise in-transit */
1014  		dst_pid = (uint32_t)0xfffffff2;
1015  	} else {
1016  		if (dst_port->ip_receiver == ipc_space_kernel) {
1017  			dst_pid = 0;
1018  		} else {
1019  			ipc_space_t dst_space;
1020  			dst_space = dst_port->ip_receiver;
1021  			if (dst_space && is_active(dst_space)) {
1022  				dst_pid = task_pid(dst_space->is_task);
1023  				if (task_is_daemon(dst_space->is_task)) {
1024  					msg_flags |= KMSG_TRACE_FLAG_DAEMON_DST;
1025  				} else if (task_is_app(dst_space->is_task)) {
1026  					msg_flags |= KMSG_TRACE_FLAG_APP_DST;
1027  				}
1028  			} else {
1029  				/* receiving task is being torn down */
1030  				dst_pid = (uint32_t)0xfffffff3;
1031  			}
1032  		}
1033  	}
1034  
1035  	if (dst_port->ip_messages.imq_qlimit != MACH_PORT_QLIMIT_DEFAULT) {
1036  		msg_flags |= KMSG_TRACE_FLAG_DST_NDFLTQ;
1037  	}
1038  	if (imq_full(&dst_port->ip_messages)) {
1039  		msg_flags |= KMSG_TRACE_FLAG_DSTQFULL;
1040  	}
1041  
1042  	kotype = ip_kotype(dst_port);
1043  
1044  	ip_unlock(dst_port);
1045  
1046  	switch (kotype) {
1047  	case IKOT_SEMAPHORE:
1048  		msg_flags |= KMSG_TRACE_FLAG_SEMA;
1049  		break;
1050  	case IKOT_TIMER:
1051  	case IKOT_CLOCK:
1052  		msg_flags |= KMSG_TRACE_FLAG_TIMER;
1053  		break;
1054  	case IKOT_MASTER_DEVICE:
1055  	case IKOT_IOKIT_CONNECT:
1056  	case IKOT_IOKIT_OBJECT:
1057  	case IKOT_IOKIT_IDENT:
1058  	case IKOT_UEXT_OBJECT:
1059  		msg_flags |= KMSG_TRACE_FLAG_IOKIT;
1060  		break;
1061  	default:
1062  		break;
1063  	}
1064  
1065  	switch (MACH_MSGH_BITS_REMOTE(msg->msgh_bits)) {
1066  	case MACH_MSG_TYPE_PORT_SEND_ONCE:
1067  		msg_flags |= KMSG_TRACE_FLAG_DST_SONCE;
1068  		break;
1069  	default:
1070  		break;
1071  	}
1072  
1073  
1074  	/*
1075  	 * Message size / content
1076  	 */
1077  	msg_size = msg->msgh_size - sizeof(mach_msg_header_t);
1078  
1079  	if (msg->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
1080  		mach_msg_body_t *msg_body;
1081  		mach_msg_descriptor_t *kern_dsc;
1082  		int dsc_count;
1083  
1084  		msg_flags |= KMSG_TRACE_FLAG_COMPLEX;
1085  
1086  		msg_body = (mach_msg_body_t *)(kmsg->ikm_header + 1);
1087  		dsc_count = (int)msg_body->msgh_descriptor_count;
1088  		kern_dsc = (mach_msg_descriptor_t *)(msg_body + 1);
1089  
1090  		/* this is gross: see ipc_kmsg_copyin_body()... */
1091  		if (!is_task_64bit) {
1092  			msg_size -= (dsc_count * 12);
1093  		}
1094  
1095  		for (int i = 0; i < dsc_count; i++) {
1096  			switch (kern_dsc[i].type.type) {
1097  			case MACH_MSG_PORT_DESCRIPTOR:
1098  				num_ports++;
1099  				if (is_task_64bit) {
1100  					msg_size -= 12;
1101  				}
1102  				break;
1103  			case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
1104  			case MACH_MSG_OOL_DESCRIPTOR: {
1105  				mach_msg_ool_descriptor_t *dsc;
1106  				dsc = (mach_msg_ool_descriptor_t *)&kern_dsc[i];
1107  				msg_flags |= KMSG_TRACE_FLAG_OOLMEM;
1108  				msg_size += dsc->size;
1109  				if ((dsc->size >= MSG_OOL_SIZE_SMALL) &&
1110  				    (dsc->copy == MACH_MSG_PHYSICAL_COPY) &&
1111  				    !dsc->deallocate) {
1112  					msg_flags |= KMSG_TRACE_FLAG_PCPY;
1113  				} else if (dsc->size <= MSG_OOL_SIZE_SMALL) {
1114  					msg_flags |= KMSG_TRACE_FLAG_PCPY;
1115  				} else {
1116  					msg_flags |= KMSG_TRACE_FLAG_VCPY;
1117  				}
1118  				if (is_task_64bit) {
1119  					msg_size -= 16;
1120  				}
1121  			} break;
1122  			case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
1123  				mach_msg_ool_ports_descriptor_t *dsc;
1124  				dsc = (mach_msg_ool_ports_descriptor_t *)&kern_dsc[i];
1125  				num_ports += dsc->count;
1126  				if (is_task_64bit) {
1127  					msg_size -= 16;
1128  				}
1129  			} break;
1130  			case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
1131  				num_ports++;
1132  				msg_flags |= KMSG_TRACE_FLAG_GUARDED_DESC;
1133  				if (is_task_64bit) {
1134  					msg_size -= 16;
1135  				}
1136  				break;
1137  			default:
1138  				break;
1139  			}
1140  		}
1141  	}
1142  
1143  	/*
1144  	 * Trailer contents
1145  	 */
1146  	trailer = (mach_msg_trailer_t *)((vm_offset_t)msg +
1147  	    (vm_offset_t)mach_round_msg(msg->msgh_size));
1148  	if (trailer->msgh_trailer_size <= sizeof(mach_msg_security_trailer_t)) {
1149  		extern const security_token_t KERNEL_SECURITY_TOKEN;
1150  		mach_msg_security_trailer_t *strailer;
1151  		strailer = (mach_msg_security_trailer_t *)trailer;
1152  		/*
1153  		 * verify the sender PID: replies from the kernel often look
1154  		 * like self-talk because the sending port is not reset.
1155  		 */
1156  		if (memcmp(&strailer->msgh_sender,
1157  		    &KERNEL_SECURITY_TOKEN,
1158  		    sizeof(KERNEL_SECURITY_TOKEN)) == 0) {
1159  			send_pid = 0;
1160  			msg_flags &= ~(KMSG_TRACE_FLAG_APP_SRC | KMSG_TRACE_FLAG_DAEMON_SRC);
1161  		}
1162  	}
1163  
1164  	KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END,
1165  	    (uintptr_t)send_pid,
1166  	    (uintptr_t)dst_pid,
1167  	    (uintptr_t)msg_size,
1168  	    (uintptr_t)(
1169  		    ((msg_flags & KMSG_TRACE_FLAGS_MASK) << KMSG_TRACE_FLAGS_SHIFT) |
1170  		    ((num_ports & KMSG_TRACE_PORTS_MASK) << KMSG_TRACE_PORTS_SHIFT)
1171  		    )
1172  	    );
1173  }
1174  #endif
1175  
1176  /* zone for cached ipc_kmsg_t structures */
1177  ZONE_DECLARE(ipc_kmsg_zone, "ipc kmsgs", IKM_SAVED_KMSG_SIZE,
1178      ZC_CACHING | ZC_ZFREE_CLEARMEM);
1179  static TUNABLE(bool, enforce_strict_reply, "ipc_strict_reply", false);
1180  
1181  /*
1182   * Forward declarations
1183   */
1184  
1185  void ipc_kmsg_clean(
1186  	ipc_kmsg_t      kmsg);
1187  
1188  void ipc_kmsg_clean_body(
1189  	ipc_kmsg_t      kmsg,
1190  	mach_msg_type_number_t  number,
1191  	mach_msg_descriptor_t   *desc);
1192  
1193  void ipc_kmsg_clean_partial(
1194  	ipc_kmsg_t              kmsg,
1195  	mach_msg_type_number_t  number,
1196  	mach_msg_descriptor_t   *desc,
1197  	vm_offset_t             paddr,
1198  	vm_size_t               length);
1199  
1200  mach_msg_return_t ipc_kmsg_copyin_body(
1201  	ipc_kmsg_t          kmsg,
1202  	ipc_space_t         space,
1203  	vm_map_t            map,
1204  	mach_msg_option_t   *optionp);
1205  
1206  
1207  static void
1208  ipc_kmsg_link_reply_context_locked(
1209  	ipc_port_t reply_port,
1210  	ipc_port_t voucher_port);
1211  
1212  static kern_return_t
1213  ipc_kmsg_validate_reply_port_locked(
1214  	ipc_port_t reply_port,
1215  	mach_msg_option_t options);
1216  
1217  static mach_msg_return_t
1218  ipc_kmsg_validate_reply_context_locked(
1219  	mach_msg_option_t option,
1220  	ipc_port_t dest_port,
1221  	ipc_voucher_t voucher,
1222  	mach_port_name_t voucher_name);
1223  
1224  /* we can't include the BSD <sys/persona.h> header here... */
1225  #ifndef PERSONA_ID_NONE
1226  #define PERSONA_ID_NONE ((uint32_t)-1)
1227  #endif
1228  
1229  /*
1230   *	We keep a per-processor cache of kernel message buffers.
1231   *	The cache saves the overhead/locking of using kalloc/kfree.
1232   *	The per-processor cache seems to miss less than a per-thread cache,
1233   *	and it also uses less memory.  Access to the cache doesn't
1234   *	require locking.
1235   */
1236  
1237  /*
1238   *	Routine:	ikm_set_header
1239   *	Purpose:
1240   *		Set the header (and data) pointers for a message. If the
1241   *      message is small, the data pointer is NULL and all the
1242   *      data resides within the fixed
1243   *		the cache, that is best.  Otherwise, allocate a new one.
1244   *	Conditions:
1245   *		Nothing locked.
1246   */
1247  static void
1248  ikm_set_header(
1249  	ipc_kmsg_t kmsg,
1250  	void *data,
1251  	mach_msg_size_t mtsize)
1252  {
1253  	if (data) {
1254  		kmsg->ikm_data = data;
1255  		kmsg->ikm_header = (mach_msg_header_t *)(data + kmsg->ikm_size - mtsize);
1256  	} else {
1257  		assert(kmsg->ikm_size == IKM_SAVED_MSG_SIZE);
1258  		kmsg->ikm_header = (mach_msg_header_t *)
1259  		    ((vm_offset_t)(kmsg + 1) + kmsg->ikm_size - mtsize);
1260  	}
1261  }
1262  
1263  /*
1264   *	Routine:	ipc_kmsg_alloc
1265   *	Purpose:
1266   *		Allocate a kernel message structure.  If we can get one from
1267   *		the cache, that is best.  Otherwise, allocate a new one.
1268   *	Conditions:
1269   *		Nothing locked.
1270   */
1271  ipc_kmsg_t
1272  ipc_kmsg_alloc(
1273  	mach_msg_size_t msg_and_trailer_size)
1274  {
1275  	mach_msg_size_t max_expanded_size;
1276  	ipc_kmsg_t kmsg;
1277  	void *data;
1278  
1279  	/*
1280  	 * LP64support -
1281  	 * Pad the allocation in case we need to expand the
1282  	 * message descriptors for user spaces with pointers larger than
1283  	 * the kernel's own, or vice versa.  We don't know how many descriptors
1284  	 * there are yet, so just assume the whole body could be
1285  	 * descriptors (if there could be any at all).
1286  	 *
1287  	 * The expansion space is left in front of the header,
1288  	 * because it is easier to pull the header and descriptors
1289  	 * forward as we process them than it is to push all the
1290  	 * data backwards.
1291  	 */
1292  	mach_msg_size_t size = msg_and_trailer_size - MAX_TRAILER_SIZE;
1293  
1294  	/* compare against implementation upper limit for the body */
1295  	if (size > ipc_kmsg_max_body_space) {
1296  		return IKM_NULL;
1297  	}
1298  
1299  	if (size > sizeof(mach_msg_base_t)) {
1300  		mach_msg_size_t max_desc = (mach_msg_size_t)(((size - sizeof(mach_msg_base_t)) /
1301  		    sizeof(mach_msg_ool_descriptor32_t)) *
1302  		    DESC_SIZE_ADJUSTMENT);
1303  
1304  		/* make sure expansion won't cause wrap */
1305  		if (msg_and_trailer_size > MACH_MSG_SIZE_MAX - max_desc) {
1306  			return IKM_NULL;
1307  		}
1308  
1309  		max_expanded_size = msg_and_trailer_size + max_desc;
1310  	} else {
1311  		max_expanded_size = msg_and_trailer_size;
1312  	}
1313  
1314  	if (max_expanded_size > IKM_SAVED_MSG_SIZE) {
1315  		data = kheap_alloc(KHEAP_DATA_BUFFERS, max_expanded_size, Z_WAITOK);
1316  		if (data == NULL) {
1317  			return IKM_NULL;
1318  		}
1319  	} else {
1320  		data = NULL;
1321  		max_expanded_size = IKM_SAVED_MSG_SIZE;
1322  	}
1323  
1324  	kmsg = zalloc_flags(ipc_kmsg_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1325  	kmsg->ikm_size = max_expanded_size;
1326  	ikm_qos_init(kmsg);
1327  	ikm_set_header(kmsg, data, msg_and_trailer_size);
1328  	assert((kmsg->ikm_prev = kmsg->ikm_next = IKM_BOGUS));
1329  
1330  	return kmsg;
1331  }
1332  
1333  /*
1334   *	Routine:	ipc_kmsg_free
1335   *	Purpose:
1336   *		Free a kernel message buffer.  If the kms is preallocated
1337   *		to a port, just "put it back (marked unused)."  We have to
1338   *		do this with the port locked.  The port may have its hold
1339   *		on our message released.  In that case, we have to just
1340   *		revert the message to a traditional one and free it normally.
1341   *	Conditions:
1342   *		Nothing locked.
1343   */
1344  
1345  void
1346  ipc_kmsg_free(
1347  	ipc_kmsg_t      kmsg)
1348  {
1349  	mach_msg_size_t size = kmsg->ikm_size;
1350  	ipc_port_t port;
1351  
1352  	assert(!IP_VALID(kmsg->ikm_voucher));
1353  
1354  	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_FREE) | DBG_FUNC_NONE,
1355  	    VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
1356  	    0, 0, 0, 0);
1357  
1358  	/*
1359  	 * Check to see if the message is bound to the port.  If so,
1360  	 * mark it not in use.  If the port isn't already dead, then
1361  	 * leave the message associated with it.  Otherwise, free it.
1362  	 */
1363  	if (size == IKM_SAVED_MSG_SIZE) {
1364  		if ((void *)kmsg->ikm_header < (void *)(kmsg + 1) ||
1365  		    (void *)kmsg->ikm_header >= (void *)(kmsg + 1) + IKM_SAVED_MSG_SIZE) {
1366  			panic("ipc_kmsg_free");
1367  		}
1368  		port = ikm_prealloc_inuse_port(kmsg);
1369  		if (port != IP_NULL) {
1370  			ip_lock(port);
1371  			ikm_prealloc_clear_inuse(kmsg, port);
1372  			if (ip_active(port) && (port->ip_premsg == kmsg)) {
1373  				assert(IP_PREALLOC(port));
1374  				ip_unlock(port);
1375  				ip_release(port);
1376  				return;
1377  			}
1378  			ip_unlock(port);
1379  			ip_release(port); /* May be last reference */
1380  		}
1381  	} else {
1382  		void *data = kmsg->ikm_data;
1383  		if ((void *)kmsg->ikm_header < data ||
1384  		    (void *)kmsg->ikm_header >= data + size) {
1385  			panic("ipc_kmsg_free");
1386  		}
1387  		kheap_free(KHEAP_DATA_BUFFERS, data, size);
1388  	}
1389  	zfree(ipc_kmsg_zone, kmsg);
1390  }
1391  
1392  
1393  /*
1394   *	Routine:	ipc_kmsg_enqueue
1395   *	Purpose:
1396   *		Enqueue a kmsg.
1397   */
1398  
1399  void
1400  ipc_kmsg_enqueue(
1401  	ipc_kmsg_queue_t        queue,
1402  	ipc_kmsg_t              kmsg)
1403  {
1404  	ipc_kmsg_t first = queue->ikmq_base;
1405  	ipc_kmsg_t last;
1406  
1407  	if (first == IKM_NULL) {
1408  		queue->ikmq_base = kmsg;
1409  		kmsg->ikm_next = kmsg;
1410  		kmsg->ikm_prev = kmsg;
1411  	} else {
1412  		last = first->ikm_prev;
1413  		kmsg->ikm_next = first;
1414  		kmsg->ikm_prev = last;
1415  		first->ikm_prev = kmsg;
1416  		last->ikm_next = kmsg;
1417  	}
1418  }
1419  
1420  /*
1421   *	Routine:	ipc_kmsg_enqueue_qos
1422   *	Purpose:
1423   *		Enqueue a kmsg, propagating qos
1424   *		overrides towards the head of the queue.
1425   *
1426   *	Returns:
1427   *		whether the head of the queue had
1428   *		it's override-qos adjusted because
1429   *		of this insertion.
1430   */
1431  
1432  boolean_t
1433  ipc_kmsg_enqueue_qos(
1434  	ipc_kmsg_queue_t        queue,
1435  	ipc_kmsg_t              kmsg)
1436  {
1437  	ipc_kmsg_t first = queue->ikmq_base;
1438  	ipc_kmsg_t prev;
1439  	mach_msg_qos_t qos_ovr;
1440  
1441  	if (first == IKM_NULL) {
1442  		/* insert a first message */
1443  		queue->ikmq_base = kmsg;
1444  		kmsg->ikm_next = kmsg;
1445  		kmsg->ikm_prev = kmsg;
1446  		return TRUE;
1447  	}
1448  
1449  	/* insert at the tail */
1450  	prev = first->ikm_prev;
1451  	kmsg->ikm_next = first;
1452  	kmsg->ikm_prev = prev;
1453  	first->ikm_prev = kmsg;
1454  	prev->ikm_next = kmsg;
1455  
1456  	/* apply QoS overrides towards the head */
1457  	qos_ovr = kmsg->ikm_qos_override;
1458  	while (prev != kmsg &&
1459  	    qos_ovr > prev->ikm_qos_override) {
1460  		prev->ikm_qos_override = qos_ovr;
1461  		prev = prev->ikm_prev;
1462  	}
1463  
1464  	/* did we adjust everything? */
1465  	return prev == kmsg;
1466  }
1467  
1468  /*
1469   *	Routine:	ipc_kmsg_override_qos
1470   *	Purpose:
1471   *		Update the override for a given kmsg already
1472   *		enqueued, propagating qos override adjustments
1473   *		towards	the head of the queue.
1474   *
1475   *	Returns:
1476   *		whether the head of the queue had
1477   *		it's override-qos adjusted because
1478   *		of this insertion.
1479   */
1480  
1481  boolean_t
1482  ipc_kmsg_override_qos(
1483  	ipc_kmsg_queue_t    queue,
1484  	ipc_kmsg_t          kmsg,
1485  	mach_msg_qos_t      qos_ovr)
1486  {
1487  	ipc_kmsg_t first = queue->ikmq_base;
1488  	ipc_kmsg_t cur = kmsg;
1489  
1490  	/* apply QoS overrides towards the head */
1491  	while (qos_ovr > cur->ikm_qos_override) {
1492  		cur->ikm_qos_override = qos_ovr;
1493  		if (cur == first) {
1494  			return TRUE;
1495  		}
1496  		cur = cur->ikm_prev;
1497  	}
1498  	return FALSE;
1499  }
1500  
1501  /*
1502   *	Routine:	ipc_kmsg_dequeue
1503   *	Purpose:
1504   *		Dequeue and return a kmsg.
1505   */
1506  
1507  ipc_kmsg_t
1508  ipc_kmsg_dequeue(
1509  	ipc_kmsg_queue_t        queue)
1510  {
1511  	ipc_kmsg_t first;
1512  
1513  	first = ipc_kmsg_queue_first(queue);
1514  
1515  	if (first != IKM_NULL) {
1516  		ipc_kmsg_rmqueue(queue, first);
1517  	}
1518  
1519  	return first;
1520  }
1521  
1522  /*
1523   *	Routine:	ipc_kmsg_rmqueue
1524   *	Purpose:
1525   *		Pull a kmsg out of a queue.
1526   */
1527  
1528  void
1529  ipc_kmsg_rmqueue(
1530  	ipc_kmsg_queue_t        queue,
1531  	ipc_kmsg_t              kmsg)
1532  {
1533  	ipc_kmsg_t next, prev;
1534  
1535  	assert(queue->ikmq_base != IKM_NULL);
1536  
1537  	next = kmsg->ikm_next;
1538  	prev = kmsg->ikm_prev;
1539  
1540  	if (next == kmsg) {
1541  		assert(prev == kmsg);
1542  		assert(queue->ikmq_base == kmsg);
1543  
1544  		queue->ikmq_base = IKM_NULL;
1545  	} else {
1546  		if (__improbable(next->ikm_prev != kmsg || prev->ikm_next != kmsg)) {
1547  			panic("ipc_kmsg_rmqueue: inconsistent prev/next pointers. "
1548  			    "(prev->next: %p, next->prev: %p, kmsg: %p)",
1549  			    prev->ikm_next, next->ikm_prev, kmsg);
1550  		}
1551  
1552  		if (queue->ikmq_base == kmsg) {
1553  			queue->ikmq_base = next;
1554  		}
1555  
1556  		next->ikm_prev = prev;
1557  		prev->ikm_next = next;
1558  	}
1559  	/* XXX Temporary debug logic */
1560  	assert((kmsg->ikm_next = IKM_BOGUS) == IKM_BOGUS);
1561  	assert((kmsg->ikm_prev = IKM_BOGUS) == IKM_BOGUS);
1562  }
1563  
1564  /*
1565   *	Routine:	ipc_kmsg_queue_next
1566   *	Purpose:
1567   *		Return the kmsg following the given kmsg.
1568   *		(Or IKM_NULL if it is the last one in the queue.)
1569   */
1570  
1571  ipc_kmsg_t
1572  ipc_kmsg_queue_next(
1573  	ipc_kmsg_queue_t        queue,
1574  	ipc_kmsg_t              kmsg)
1575  {
1576  	ipc_kmsg_t next;
1577  
1578  	assert(queue->ikmq_base != IKM_NULL);
1579  
1580  	next = kmsg->ikm_next;
1581  	if (queue->ikmq_base == next) {
1582  		next = IKM_NULL;
1583  	}
1584  
1585  	return next;
1586  }
1587  
1588  /*
1589   *	Routine:	ipc_kmsg_destroy
1590   *	Purpose:
1591   *		Destroys a kernel message.  Releases all rights,
1592   *		references, and memory held by the message.
1593   *		Frees the message.
1594   *	Conditions:
1595   *		No locks held.
1596   */
1597  
1598  void
1599  ipc_kmsg_destroy(
1600  	ipc_kmsg_t      kmsg)
1601  {
1602  	/*
1603  	 *	Destroying a message can cause more messages to be destroyed.
1604  	 *	Curtail recursion by putting messages on the deferred
1605  	 *	destruction queue.  If this was the first message on the
1606  	 *	queue, this instance must process the full queue.
1607  	 */
1608  	if (ipc_kmsg_delayed_destroy(kmsg)) {
1609  		ipc_kmsg_reap_delayed();
1610  	}
1611  }
1612  
1613  /*
1614   *	Routine:	ipc_kmsg_delayed_destroy
1615   *	Purpose:
1616   *		Enqueues a kernel message for deferred destruction.
1617   *	Returns:
1618   *		Boolean indicator that the caller is responsible to reap
1619   *		deferred messages.
1620   */
1621  
1622  boolean_t
1623  ipc_kmsg_delayed_destroy(
1624  	ipc_kmsg_t kmsg)
1625  {
1626  	ipc_kmsg_queue_t queue = &(current_thread()->ith_messages);
1627  	boolean_t first = ipc_kmsg_queue_empty(queue);
1628  
1629  	ipc_kmsg_enqueue(queue, kmsg);
1630  	return first;
1631  }
1632  
1633  /*
1634   *	Routine:	ipc_kmsg_destroy_queue
1635   *	Purpose:
1636   *		Destroys messages from the per-thread
1637   *		deferred reaping queue.
1638   *	Conditions:
1639   *		No locks held.
1640   */
1641  
1642  void
1643  ipc_kmsg_reap_delayed(void)
1644  {
1645  	ipc_kmsg_queue_t queue = &(current_thread()->ith_messages);
1646  	ipc_kmsg_t kmsg;
1647  
1648  	/*
1649  	 * must leave kmsg in queue while cleaning it to assure
1650  	 * no nested calls recurse into here.
1651  	 */
1652  	while ((kmsg = ipc_kmsg_queue_first(queue)) != IKM_NULL) {
1653  		ipc_kmsg_clean(kmsg);
1654  		ipc_kmsg_rmqueue(queue, kmsg);
1655  		ipc_kmsg_free(kmsg);
1656  	}
1657  }
1658  
1659  /*
1660   *	Routine:	ipc_kmsg_clean_body
1661   *	Purpose:
1662   *		Cleans the body of a kernel message.
1663   *		Releases all rights, references, and memory.
1664   *
1665   *	Conditions:
1666   *		No locks held.
1667   */
1668  static unsigned int _ipc_kmsg_clean_invalid_desc = 0;
1669  void
1670  ipc_kmsg_clean_body(
1671  	__unused ipc_kmsg_t     kmsg,
1672  	mach_msg_type_number_t  number,
1673  	mach_msg_descriptor_t   *saddr)
1674  {
1675  	mach_msg_type_number_t      i;
1676  
1677  	if (number == 0) {
1678  		return;
1679  	}
1680  
1681  	for (i = 0; i < number; i++, saddr++) {
1682  		switch (saddr->type.type) {
1683  		case MACH_MSG_PORT_DESCRIPTOR: {
1684  			mach_msg_port_descriptor_t *dsc;
1685  
1686  			dsc = &saddr->port;
1687  
1688  			/*
1689  			 * Destroy port rights carried in the message
1690  			 */
1691  			if (!IP_VALID(dsc->name)) {
1692  				continue;
1693  			}
1694  			ipc_object_destroy(ip_to_object(dsc->name), dsc->disposition);
1695  			break;
1696  		}
1697  		case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
1698  		case MACH_MSG_OOL_DESCRIPTOR: {
1699  			mach_msg_ool_descriptor_t *dsc;
1700  
1701  			dsc = (mach_msg_ool_descriptor_t *)&saddr->out_of_line;
1702  
1703  			/*
1704  			 * Destroy memory carried in the message
1705  			 */
1706  			if (dsc->size == 0) {
1707  				assert(dsc->address == (void *) 0);
1708  			} else {
1709  				vm_map_copy_discard((vm_map_copy_t) dsc->address);
1710  			}
1711  			break;
1712  		}
1713  		case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
1714  			ipc_object_t                    *objects;
1715  			mach_msg_type_number_t          j;
1716  			mach_msg_ool_ports_descriptor_t *dsc;
1717  
1718  			dsc = (mach_msg_ool_ports_descriptor_t  *)&saddr->ool_ports;
1719  			objects = (ipc_object_t *) dsc->address;
1720  
1721  			if (dsc->count == 0) {
1722  				break;
1723  			}
1724  
1725  			assert(objects != (ipc_object_t *) 0);
1726  
1727  			/* destroy port rights carried in the message */
1728  
1729  			for (j = 0; j < dsc->count; j++) {
1730  				ipc_object_t object = objects[j];
1731  
1732  				if (!IO_VALID(object)) {
1733  					continue;
1734  				}
1735  
1736  				ipc_object_destroy(object, dsc->disposition);
1737  			}
1738  
1739  			/* destroy memory carried in the message */
1740  
1741  			assert(dsc->count != 0);
1742  
1743  			kfree(dsc->address,
1744  			    (vm_size_t) dsc->count * sizeof(mach_port_t));
1745  			break;
1746  		}
1747  		case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
1748  			mach_msg_guarded_port_descriptor_t *dsc = (typeof(dsc)) & saddr->guarded_port;
1749  
1750  			/*
1751  			 * Destroy port rights carried in the message
1752  			 */
1753  			if (!IP_VALID(dsc->name)) {
1754  				continue;
1755  			}
1756  			ipc_object_destroy(ip_to_object(dsc->name), dsc->disposition);
1757  			break;
1758  		}
1759  		default: {
1760  			_ipc_kmsg_clean_invalid_desc++;         /* don't understand this type of descriptor */
1761  		}
1762  		}
1763  	}
1764  }
1765  
1766  /*
1767   *	Routine:	ipc_kmsg_clean_partial
1768   *	Purpose:
1769   *		Cleans a partially-acquired kernel message.
1770   *		number is the index of the type descriptor
1771   *		in the body of the message that contained the error.
1772   *		If dolast, the memory and port rights in this last
1773   *		type spec are also cleaned.  In that case, number
1774   *		specifies the number of port rights to clean.
1775   *	Conditions:
1776   *		Nothing locked.
1777   */
1778  
1779  void
1780  ipc_kmsg_clean_partial(
1781  	ipc_kmsg_t              kmsg,
1782  	mach_msg_type_number_t  number,
1783  	mach_msg_descriptor_t   *desc,
1784  	vm_offset_t             paddr,
1785  	vm_size_t               length)
1786  {
1787  	ipc_object_t object;
1788  	mach_msg_bits_t mbits = kmsg->ikm_header->msgh_bits;
1789  
1790  	/* deal with importance chain while we still have dest and voucher references */
1791  	ipc_importance_clean(kmsg);
1792  
1793  	object = ip_to_object(kmsg->ikm_header->msgh_remote_port);
1794  	assert(IO_VALID(object));
1795  	ipc_object_destroy_dest(object, MACH_MSGH_BITS_REMOTE(mbits));
1796  
1797  	object = ip_to_object(kmsg->ikm_header->msgh_local_port);
1798  	if (IO_VALID(object)) {
1799  		ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits));
1800  	}
1801  
1802  	object = ip_to_object(kmsg->ikm_voucher);
1803  	if (IO_VALID(object)) {
1804  		assert(MACH_MSGH_BITS_VOUCHER(mbits) == MACH_MSG_TYPE_MOVE_SEND);
1805  		ipc_object_destroy(object, MACH_MSG_TYPE_PORT_SEND);
1806  		kmsg->ikm_voucher = IP_NULL;
1807  	}
1808  
1809  	if (paddr) {
1810  		(void) vm_deallocate(ipc_kernel_copy_map, paddr, length);
1811  	}
1812  
1813  	ipc_kmsg_clean_body(kmsg, number, desc);
1814  }
1815  
1816  /*
1817   *	Routine:	ipc_kmsg_clean
1818   *	Purpose:
1819   *		Cleans a kernel message.  Releases all rights,
1820   *		references, and memory held by the message.
1821   *	Conditions:
1822   *		No locks held.
1823   */
1824  
1825  void
1826  ipc_kmsg_clean(
1827  	ipc_kmsg_t      kmsg)
1828  {
1829  	ipc_object_t object;
1830  	mach_msg_bits_t mbits;
1831  
1832  	/* deal with importance chain while we still have dest and voucher references */
1833  	ipc_importance_clean(kmsg);
1834  
1835  	mbits = kmsg->ikm_header->msgh_bits;
1836  	object = ip_to_object(kmsg->ikm_header->msgh_remote_port);
1837  	if (IO_VALID(object)) {
1838  		ipc_object_destroy_dest(object, MACH_MSGH_BITS_REMOTE(mbits));
1839  	}
1840  
1841  	object = ip_to_object(kmsg->ikm_header->msgh_local_port);
1842  	if (IO_VALID(object)) {
1843  		ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits));
1844  	}
1845  
1846  	object = ip_to_object(kmsg->ikm_voucher);
1847  	if (IO_VALID(object)) {
1848  		assert(MACH_MSGH_BITS_VOUCHER(mbits) == MACH_MSG_TYPE_MOVE_SEND);
1849  		ipc_object_destroy(object, MACH_MSG_TYPE_PORT_SEND);
1850  		kmsg->ikm_voucher = IP_NULL;
1851  	}
1852  
1853  	if (mbits & MACH_MSGH_BITS_COMPLEX) {
1854  		mach_msg_body_t *body;
1855  
1856  		body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
1857  		ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count,
1858  		    (mach_msg_descriptor_t *)(body + 1));
1859  	}
1860  }
1861  
1862  /*
1863   *	Routine:	ipc_kmsg_set_prealloc
1864   *	Purpose:
1865   *		Assign a kmsg as a preallocated message buffer to a port.
1866   *	Conditions:
1867   *		port locked.
1868   */
1869  
1870  void
1871  ipc_kmsg_set_prealloc(
1872  	ipc_kmsg_t              kmsg,
1873  	ipc_port_t              port)
1874  {
1875  	assert(kmsg->ikm_prealloc == IP_NULL);
1876  
1877  	kmsg->ikm_prealloc = IP_NULL;
1878  
1879  	assert(port_send_turnstile(port) == TURNSTILE_NULL);
1880  	kmsg->ikm_turnstile = TURNSTILE_NULL;
1881  	IP_SET_PREALLOC(port, kmsg);
1882  }
1883  
1884  /*
1885   *	Routine:	ipc_kmsg_clear_prealloc
1886   *	Purpose:
1887   *		Release the Assignment of a preallocated message buffer from a port.
1888   *	Conditions:
1889   *		port locked.
1890   */
1891  void
1892  ipc_kmsg_clear_prealloc(
1893  	ipc_kmsg_t              kmsg,
1894  	ipc_port_t              port)
1895  {
1896  	/* take the mqueue lock since the turnstile is protected under it */
1897  	imq_lock(&port->ip_messages);
1898  
1899  	IP_CLEAR_PREALLOC(port, kmsg);
1900  	set_port_send_turnstile(port, kmsg->ikm_turnstile);
1901  	imq_unlock(&port->ip_messages);
1902  }
1903  
1904  /*
1905   *	Routine:	ipc_kmsg_prealloc
1906   *	Purpose:
1907   *		Wraper to ipc_kmsg_alloc() to account for
1908   *		header expansion requirements.
1909   */
1910  ipc_kmsg_t
1911  ipc_kmsg_prealloc(mach_msg_size_t size)
1912  {
1913  #if defined(__LP64__)
1914  	if (size > IKM_SAVED_MSG_SIZE - LEGACY_HEADER_SIZE_DELTA) {
1915  		panic("ipc_kmsg_prealloc");
1916  	}
1917  
1918  	size += LEGACY_HEADER_SIZE_DELTA;
1919  #endif
1920  	return ipc_kmsg_alloc(size);
1921  }
1922  
1923  
1924  /*
1925   *	Routine:	ipc_kmsg_get
1926   *	Purpose:
1927   *		Allocates a kernel message buffer.
1928   *		Copies a user message to the message buffer.
1929   *	Conditions:
1930   *		Nothing locked.
1931   *	Returns:
1932   *		MACH_MSG_SUCCESS	Acquired a message buffer.
1933   *		MACH_SEND_MSG_TOO_SMALL	Message smaller than a header.
1934   *		MACH_SEND_MSG_TOO_SMALL	Message size not long-word multiple.
1935   *		MACH_SEND_TOO_LARGE	Message too large to ever be sent.
1936   *		MACH_SEND_NO_BUFFER	Couldn't allocate a message buffer.
1937   *		MACH_SEND_INVALID_DATA	Couldn't copy message data.
1938   */
1939  
1940  mach_msg_return_t
1941  ipc_kmsg_get(
1942  	mach_vm_address_t       msg_addr,
1943  	mach_msg_size_t size,
1944  	ipc_kmsg_t              *kmsgp)
1945  {
1946  	mach_msg_size_t                 msg_and_trailer_size;
1947  	ipc_kmsg_t                      kmsg;
1948  	mach_msg_max_trailer_t          *trailer;
1949  	mach_msg_legacy_base_t      legacy_base;
1950  	mach_msg_size_t             len_copied;
1951  	legacy_base.body.msgh_descriptor_count = 0;
1952  
1953  	if ((size < sizeof(mach_msg_legacy_header_t)) || (size & 3)) {
1954  		return MACH_SEND_MSG_TOO_SMALL;
1955  	}
1956  
1957  	if (size > ipc_kmsg_max_body_space) {
1958  		return MACH_SEND_TOO_LARGE;
1959  	}
1960  
1961  	if (size == sizeof(mach_msg_legacy_header_t)) {
1962  		len_copied = sizeof(mach_msg_legacy_header_t);
1963  	} else {
1964  		len_copied = sizeof(mach_msg_legacy_base_t);
1965  	}
1966  
1967  	if (copyinmsg(msg_addr, (char *)&legacy_base, len_copied)) {
1968  		return MACH_SEND_INVALID_DATA;
1969  	}
1970  
1971  	/*
1972  	 * If the message claims to be complex, it must at least
1973  	 * have the length of a "base" message (header + dsc_count).
1974  	 */
1975  	if (len_copied < sizeof(mach_msg_legacy_base_t) &&
1976  	    (legacy_base.header.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
1977  		return MACH_SEND_MSG_TOO_SMALL;
1978  	}
1979  
1980  	msg_addr += sizeof(legacy_base.header);
1981  #if defined(__LP64__)
1982  	size += LEGACY_HEADER_SIZE_DELTA;
1983  #endif
1984  	/* unreachable if !DEBUG */
1985  	__unreachable_ok_push
1986  	if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {
1987  		unsigned int j;
1988  		for (j = 0; j < sizeof(legacy_base.header); j++) {
1989  			kprintf("%02x\n", ((unsigned char*)&legacy_base.header)[j]);
1990  		}
1991  	}
1992  	__unreachable_ok_pop
1993  
1994  	    msg_and_trailer_size = size + MAX_TRAILER_SIZE;
1995  	kmsg = ipc_kmsg_alloc(msg_and_trailer_size);
1996  	if (kmsg == IKM_NULL) {
1997  		return MACH_SEND_NO_BUFFER;
1998  	}
1999  
2000  	kmsg->ikm_header->msgh_size                     = size;
2001  	kmsg->ikm_header->msgh_bits                     = legacy_base.header.msgh_bits;
2002  	kmsg->ikm_header->msgh_remote_port      = CAST_MACH_NAME_TO_PORT(legacy_base.header.msgh_remote_port);
2003  	kmsg->ikm_header->msgh_local_port       = CAST_MACH_NAME_TO_PORT(legacy_base.header.msgh_local_port);
2004  	kmsg->ikm_header->msgh_voucher_port             = legacy_base.header.msgh_voucher_port;
2005  	kmsg->ikm_header->msgh_id                       = legacy_base.header.msgh_id;
2006  
2007  	DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_get header:\n"
2008  	    "  size:		0x%.8x\n"
2009  	    "  bits:		0x%.8x\n"
2010  	    "  remote_port:	%p\n"
2011  	    "  local_port:	%p\n"
2012  	    "  voucher_port:	0x%.8x\n"
2013  	    "  id:		%.8d\n",
2014  	    kmsg->ikm_header->msgh_size,
2015  	    kmsg->ikm_header->msgh_bits,
2016  	    kmsg->ikm_header->msgh_remote_port,
2017  	    kmsg->ikm_header->msgh_local_port,
2018  	    kmsg->ikm_header->msgh_voucher_port,
2019  	    kmsg->ikm_header->msgh_id);
2020  
2021  	if (copyinmsg(msg_addr, (char *)(kmsg->ikm_header + 1), size - (mach_msg_size_t)sizeof(mach_msg_header_t))) {
2022  		ipc_kmsg_free(kmsg);
2023  		return MACH_SEND_INVALID_DATA;
2024  	}
2025  
2026  	/* unreachable if !DEBUG */
2027  	__unreachable_ok_push
2028  	if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {
2029  		kprintf("body: size: %lu\n", (size - sizeof(mach_msg_header_t)));
2030  		uint32_t i;
2031  		for (i = 0; i * 4 < (size - sizeof(mach_msg_header_t)); i++) {
2032  			kprintf("%.4x\n", ((uint32_t *)(kmsg->ikm_header + 1))[i]);
2033  		}
2034  	}
2035  	__unreachable_ok_pop
2036  	DEBUG_IPC_KMSG_PRINT(kmsg, "ipc_kmsg_get()");
2037  
2038  	/*
2039  	 * I reserve for the trailer the largest space (MAX_TRAILER_SIZE)
2040  	 * However, the internal size field of the trailer (msgh_trailer_size)
2041  	 * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to optimize
2042  	 * the cases where no implicit data is requested.
2043  	 */
2044  	trailer = (mach_msg_max_trailer_t *) ((vm_offset_t)kmsg->ikm_header + size);
2045  	bzero(trailer, sizeof(*trailer));
2046  	trailer->msgh_sender = current_thread()->task->sec_token;
2047  	trailer->msgh_audit = current_thread()->task->audit_token;
2048  	trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
2049  	trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;
2050  
2051  #ifdef ppc
2052  	if (trcWork.traceMask) {
2053  		dbgTrace(0x1100, (unsigned int)kmsg->ikm_header->msgh_id,
2054  		    (unsigned int)kmsg->ikm_header->msgh_remote_port,
2055  		    (unsigned int)kmsg->ikm_header->msgh_local_port, 0);
2056  	}
2057  #endif
2058  
2059  	trailer->msgh_labels.sender = 0;
2060  	*kmsgp = kmsg;
2061  	return MACH_MSG_SUCCESS;
2062  }
2063  
2064  /*
2065   *	Routine:	ipc_kmsg_get_from_kernel
2066   *	Purpose:
2067   *		First checks for a preallocated message
2068   *		reserved for kernel clients.  If not found -
2069   *		allocates a new kernel message buffer.
2070   *		Copies a kernel message to the message buffer.
2071   *		Only resource errors are allowed.
2072   *	Conditions:
2073   *		Nothing locked.
2074   *		Ports in header are ipc_port_t.
2075   *	Returns:
2076   *		MACH_MSG_SUCCESS	Acquired a message buffer.
2077   *		MACH_SEND_NO_BUFFER	Couldn't allocate a message buffer.
2078   */
2079  
2080  mach_msg_return_t
2081  ipc_kmsg_get_from_kernel(
2082  	mach_msg_header_t       *msg,
2083  	mach_msg_size_t size,
2084  	ipc_kmsg_t              *kmsgp)
2085  {
2086  	ipc_kmsg_t      kmsg;
2087  	mach_msg_size_t msg_and_trailer_size;
2088  	mach_msg_max_trailer_t *trailer;
2089  	ipc_port_t      dest_port;
2090  
2091  	assert(size >= sizeof(mach_msg_header_t));
2092  	assert((size & 3) == 0);
2093  
2094  	dest_port = msg->msgh_remote_port;
2095  
2096  	msg_and_trailer_size = size + MAX_TRAILER_SIZE;
2097  
2098  	/*
2099  	 * See if the port has a pre-allocated kmsg for kernel
2100  	 * clients.  These are set up for those kernel clients
2101  	 * which cannot afford to wait.
2102  	 */
2103  	if (IP_VALID(dest_port) && IP_PREALLOC(dest_port)) {
2104  		mach_msg_size_t max_desc = 0;
2105  
2106  		ip_lock(dest_port);
2107  		if (!ip_active(dest_port)) {
2108  			ip_unlock(dest_port);
2109  			return MACH_SEND_NO_BUFFER;
2110  		}
2111  		assert(IP_PREALLOC(dest_port));
2112  		kmsg = dest_port->ip_premsg;
2113  		if (ikm_prealloc_inuse(kmsg)) {
2114  			ip_unlock(dest_port);
2115  			return MACH_SEND_NO_BUFFER;
2116  		}
2117  #if !defined(__LP64__)
2118  		if (msg->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
2119  			assert(size > sizeof(mach_msg_base_t));
2120  			max_desc = ((mach_msg_base_t *)msg)->body.msgh_descriptor_count *
2121  			    DESC_SIZE_ADJUSTMENT;
2122  		}
2123  #endif
2124  		if (msg_and_trailer_size > kmsg->ikm_size - max_desc) {
2125  			ip_unlock(dest_port);
2126  			return MACH_SEND_TOO_LARGE;
2127  		}
2128  		ikm_prealloc_set_inuse(kmsg, dest_port);
2129  		ikm_set_header(kmsg, NULL, msg_and_trailer_size);
2130  		ip_unlock(dest_port);
2131  	} else {
2132  		kmsg = ipc_kmsg_alloc(msg_and_trailer_size);
2133  		if (kmsg == IKM_NULL) {
2134  			return MACH_SEND_NO_BUFFER;
2135  		}
2136  	}
2137  
2138  	(void) memcpy((void *) kmsg->ikm_header, (const void *) msg, size);
2139  
2140  	ikm_qos_init(kmsg);
2141  
2142  	kmsg->ikm_header->msgh_size = size;
2143  
2144  	/*
2145  	 * I reserve for the trailer the largest space (MAX_TRAILER_SIZE)
2146  	 * However, the internal size field of the trailer (msgh_trailer_size)
2147  	 * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to
2148  	 * optimize the cases where no implicit data is requested.
2149  	 */
2150  	trailer = (mach_msg_max_trailer_t *)
2151  	    ((vm_offset_t)kmsg->ikm_header + size);
2152  	bzero(trailer, sizeof(*trailer));
2153  	trailer->msgh_sender = KERNEL_SECURITY_TOKEN;
2154  	trailer->msgh_audit = KERNEL_AUDIT_TOKEN;
2155  	trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
2156  	trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;
2157  
2158  	trailer->msgh_labels.sender = 0;
2159  
2160  	*kmsgp = kmsg;
2161  	return MACH_MSG_SUCCESS;
2162  }
2163  
2164  /*
2165   *	Routine:	ipc_kmsg_send
2166   *	Purpose:
2167   *		Send a message.  The message holds a reference
2168   *		for the destination port in the msgh_remote_port field.
2169   *
2170   *		If unsuccessful, the caller still has possession of
2171   *		the message and must do something with it.  If successful,
2172   *		the message is queued, given to a receiver, destroyed,
2173   *		or handled directly by the kernel via mach_msg.
2174   *	Conditions:
2175   *		Nothing locked.
2176   *	Returns:
2177   *		MACH_MSG_SUCCESS	The message was accepted.
2178   *		MACH_SEND_TIMED_OUT	Caller still has message.
2179   *		MACH_SEND_INTERRUPTED	Caller still has message.
2180   *		MACH_SEND_INVALID_DEST	Caller still has message.
2181   */
2182  mach_msg_return_t
2183  ipc_kmsg_send(
2184  	ipc_kmsg_t              kmsg,
2185  	mach_msg_option_t       option,
2186  	mach_msg_timeout_t      send_timeout)
2187  {
2188  	ipc_port_t port;
2189  	thread_t th = current_thread();
2190  	mach_msg_return_t error = MACH_MSG_SUCCESS;
2191  	boolean_t kernel_reply = FALSE;
2192  
2193  	/* Check if honor qlimit flag is set on thread. */
2194  	if ((th->options & TH_OPT_HONOR_QLIMIT) == TH_OPT_HONOR_QLIMIT) {
2195  		/* Remove the MACH_SEND_ALWAYS flag to honor queue limit. */
2196  		option &= (~MACH_SEND_ALWAYS);
2197  		/* Add the timeout flag since the message queue might be full. */
2198  		option |= MACH_SEND_TIMEOUT;
2199  		th->options &= (~TH_OPT_HONOR_QLIMIT);
2200  	}
2201  
2202  #if IMPORTANCE_INHERITANCE
2203  	bool did_importance = false;
2204  #if IMPORTANCE_TRACE
2205  	mach_msg_id_t imp_msgh_id = -1;
2206  	int           sender_pid  = -1;
2207  #endif /* IMPORTANCE_TRACE */
2208  #endif /* IMPORTANCE_INHERITANCE */
2209  
2210  	/* don't allow the creation of a circular loop */
2211  	if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_CIRCULAR) {
2212  		ipc_kmsg_destroy(kmsg);
2213  		KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_MSGH_BITS_CIRCULAR);
2214  		return MACH_MSG_SUCCESS;
2215  	}
2216  
2217  	ipc_voucher_send_preprocessing(kmsg);
2218  
2219  	port = kmsg->ikm_header->msgh_remote_port;
2220  	assert(IP_VALID(port));
2221  	ip_lock(port);
2222  
2223  	/*
2224  	 * If the destination has been guarded with a reply context, and the
2225  	 * sender is consuming a send-once right, then assume this is a reply
2226  	 * to an RPC and we need to validate that this sender is currently in
2227  	 * the correct context.
2228  	 */
2229  	if (enforce_strict_reply && port->ip_reply_context != 0 &&
2230  	    ((option & MACH_SEND_KERNEL) == 0) &&
2231  	    MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits) == MACH_MSG_TYPE_PORT_SEND_ONCE) {
2232  		error = ipc_kmsg_validate_reply_context_locked(option, port, th->ith_voucher, th->ith_voucher_name);
2233  		if (error != MACH_MSG_SUCCESS) {
2234  			ip_unlock(port);
2235  			return error;
2236  		}
2237  	}
2238  
2239  #if IMPORTANCE_INHERITANCE
2240  retry:
2241  #endif /* IMPORTANCE_INHERITANCE */
2242  	/*
2243  	 *	Can't deliver to a dead port.
2244  	 *	However, we can pretend it got sent
2245  	 *	and was then immediately destroyed.
2246  	 */
2247  	if (!ip_active(port)) {
2248  		ip_unlock(port);
2249  #if MACH_FLIPC
2250  		if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) {
2251  			flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE);
2252  		}
2253  #endif
2254  		if (did_importance) {
2255  			/*
2256  			 * We're going to pretend we delivered this message
2257  			 * successfully, and just eat the kmsg. However, the
2258  			 * kmsg is actually visible via the importance_task!
2259  			 * We need to cleanup this linkage before we destroy
2260  			 * the message, and more importantly before we set the
2261  			 * msgh_remote_port to NULL. See: 34302571
2262  			 */
2263  			ipc_importance_clean(kmsg);
2264  		}
2265  		ip_release(port);  /* JMM - Future: release right, not just ref */
2266  		kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
2267  		ipc_kmsg_destroy(kmsg);
2268  		KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_SEND_INVALID_DEST);
2269  		return MACH_MSG_SUCCESS;
2270  	}
2271  
2272  	if (port->ip_receiver == ipc_space_kernel) {
2273  		/*
2274  		 *	We can check ip_receiver == ipc_space_kernel
2275  		 *	before checking that the port is active because
2276  		 *	ipc_port_dealloc_kernel clears ip_receiver
2277  		 *	before destroying a kernel port.
2278  		 */
2279  		require_ip_active(port);
2280  		port->ip_messages.imq_seqno++;
2281  		ip_unlock(port);
2282  
2283  		current_task()->messages_sent++;
2284  
2285  		/*
2286  		 * Call the server routine, and get the reply message to send.
2287  		 */
2288  		kmsg = ipc_kobject_server(kmsg, option);
2289  		if (kmsg == IKM_NULL) {
2290  			return MACH_MSG_SUCCESS;
2291  		}
2292  
2293  		/* sign the reply message */
2294  		ikm_sign(kmsg);
2295  
2296  		/* restart the KMSG_INFO tracing for the reply message */
2297  		KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_START);
2298  		port = kmsg->ikm_header->msgh_remote_port;
2299  		assert(IP_VALID(port));
2300  		ip_lock(port);
2301  		/* fall thru with reply - same options */
2302  		kernel_reply = TRUE;
2303  		if (!ip_active(port)) {
2304  			error = MACH_SEND_INVALID_DEST;
2305  		}
2306  	}
2307  
2308  #if IMPORTANCE_INHERITANCE
2309  	/*
2310  	 * Need to see if this message needs importance donation and/or
2311  	 * propagation.  That routine can drop the port lock temporarily.
2312  	 * If it does we'll have to revalidate the destination.
2313  	 */
2314  	if (!did_importance) {
2315  		did_importance = true;
2316  		if (ipc_importance_send(kmsg, option)) {
2317  			goto retry;
2318  		}
2319  	}
2320  #endif /* IMPORTANCE_INHERITANCE */
2321  
2322  	if (error != MACH_MSG_SUCCESS) {
2323  		ip_unlock(port);
2324  	} else {
2325  		/*
2326  		 * We have a valid message and a valid reference on the port.
2327  		 * we can unlock the port and call mqueue_send() on its message
2328  		 * queue. Lock message queue while port is locked.
2329  		 */
2330  		imq_lock(&port->ip_messages);
2331  
2332  		ipc_special_reply_port_msg_sent(port);
2333  
2334  		ip_unlock(port);
2335  
2336  		error = ipc_mqueue_send(&port->ip_messages, kmsg, option,
2337  		    send_timeout);
2338  	}
2339  
2340  #if IMPORTANCE_INHERITANCE
2341  	if (did_importance) {
2342  		__unused int importance_cleared = 0;
2343  		switch (error) {
2344  		case MACH_SEND_TIMED_OUT:
2345  		case MACH_SEND_NO_BUFFER:
2346  		case MACH_SEND_INTERRUPTED:
2347  		case MACH_SEND_INVALID_DEST:
2348  			/*
2349  			 * We still have the kmsg and its
2350  			 * reference on the port.  But we
2351  			 * have to back out the importance
2352  			 * boost.
2353  			 *
2354  			 * The port could have changed hands,
2355  			 * be inflight to another destination,
2356  			 * etc...  But in those cases our
2357  			 * back-out will find the new owner
2358  			 * (and all the operations that
2359  			 * transferred the right should have
2360  			 * applied their own boost adjustments
2361  			 * to the old owner(s)).
2362  			 */
2363  			importance_cleared = 1;
2364  			ipc_importance_clean(kmsg);
2365  			break;
2366  
2367  		case MACH_MSG_SUCCESS:
2368  		default:
2369  			break;
2370  		}
2371  #if IMPORTANCE_TRACE
2372  		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_END,
2373  		    task_pid(current_task()), sender_pid, imp_msgh_id, importance_cleared, 0);
2374  #endif /* IMPORTANCE_TRACE */
2375  	}
2376  #endif /* IMPORTANCE_INHERITANCE */
2377  
2378  	/*
2379  	 * If the port has been destroyed while we wait, treat the message
2380  	 * as a successful delivery (like we do for an inactive port).
2381  	 */
2382  	if (error == MACH_SEND_INVALID_DEST) {
2383  #if MACH_FLIPC
2384  		if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) {
2385  			flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE);
2386  		}
2387  #endif
2388  		ip_release(port); /* JMM - Future: release right, not just ref */
2389  		kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
2390  		ipc_kmsg_destroy(kmsg);
2391  		KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_SEND_INVALID_DEST);
2392  		return MACH_MSG_SUCCESS;
2393  	}
2394  
2395  	if (error != MACH_MSG_SUCCESS && kernel_reply) {
2396  		/*
2397  		 * Kernel reply messages that fail can't be allowed to
2398  		 * pseudo-receive on error conditions. We need to just treat
2399  		 * the message as a successful delivery.
2400  		 */
2401  #if MACH_FLIPC
2402  		if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) {
2403  			flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE);
2404  		}
2405  #endif
2406  		ip_release(port); /* JMM - Future: release right, not just ref */
2407  		kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
2408  		ipc_kmsg_destroy(kmsg);
2409  		KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, error);
2410  		return MACH_MSG_SUCCESS;
2411  	}
2412  	return error;
2413  }
2414  
2415  /*
2416   *	Routine:	ipc_kmsg_put
2417   *	Purpose:
2418   *		Copies a message buffer to a user message.
2419   *		Copies only the specified number of bytes.
2420   *		Frees the message buffer.
2421   *	Conditions:
2422   *		Nothing locked.  The message buffer must have clean
2423   *		header fields.
2424   *	Returns:
2425   *		MACH_MSG_SUCCESS	Copied data out of message buffer.
2426   *		MACH_RCV_INVALID_DATA	Couldn't copy to user message.
2427   */
2428  
2429  mach_msg_return_t
2430  ipc_kmsg_put(
2431  	ipc_kmsg_t              kmsg,
2432  	mach_msg_option_t       option,
2433  	mach_vm_address_t       rcv_addr,
2434  	mach_msg_size_t         rcv_size,
2435  	mach_msg_size_t         trailer_size,
2436  	mach_msg_size_t         *sizep)
2437  {
2438  	mach_msg_size_t size = kmsg->ikm_header->msgh_size + trailer_size;
2439  	mach_msg_return_t mr;
2440  
2441  	DEBUG_IPC_KMSG_PRINT(kmsg, "ipc_kmsg_put()");
2442  
2443  
2444  	DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_put header:\n"
2445  	    "  size:		0x%.8x\n"
2446  	    "  bits:		0x%.8x\n"
2447  	    "  remote_port:	%p\n"
2448  	    "  local_port:	%p\n"
2449  	    "  voucher_port:	0x%.8x\n"
2450  	    "  id:		%.8d\n",
2451  	    kmsg->ikm_header->msgh_size,
2452  	    kmsg->ikm_header->msgh_bits,
2453  	    kmsg->ikm_header->msgh_remote_port,
2454  	    kmsg->ikm_header->msgh_local_port,
2455  	    kmsg->ikm_header->msgh_voucher_port,
2456  	    kmsg->ikm_header->msgh_id);
2457  
2458  #if defined(__LP64__)
2459  	if (current_task() != kernel_task) { /* don't if receiver expects fully-cooked in-kernel msg; */
2460  		mach_msg_legacy_header_t *legacy_header =
2461  		    (mach_msg_legacy_header_t *)((vm_offset_t)(kmsg->ikm_header) + LEGACY_HEADER_SIZE_DELTA);
2462  
2463  		mach_msg_bits_t         bits            = kmsg->ikm_header->msgh_bits;
2464  		mach_msg_size_t         msg_size        = kmsg->ikm_header->msgh_size;
2465  		mach_port_name_t        remote_port     = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_remote_port);
2466  		mach_port_name_t        local_port      = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_local_port);
2467  		mach_port_name_t        voucher_port    = kmsg->ikm_header->msgh_voucher_port;
2468  		mach_msg_id_t           id                      = kmsg->ikm_header->msgh_id;
2469  
2470  		legacy_header->msgh_id                  = id;
2471  		legacy_header->msgh_local_port = local_port;
2472  		legacy_header->msgh_remote_port = remote_port;
2473  		legacy_header->msgh_voucher_port = voucher_port;
2474  		legacy_header->msgh_size                = msg_size - LEGACY_HEADER_SIZE_DELTA;
2475  		legacy_header->msgh_bits                = bits;
2476  
2477  		size -= LEGACY_HEADER_SIZE_DELTA;
2478  		kmsg->ikm_header = (mach_msg_header_t *)legacy_header;
2479  	}
2480  #endif
2481  
2482  	/* unreachable if !DEBUG */
2483  	__unreachable_ok_push
2484  	if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {
2485  		kprintf("ipc_kmsg_put header+body: %d\n", (size));
2486  		uint32_t i;
2487  		for (i = 0; i * 4 < size; i++) {
2488  			kprintf("%.4x\n", ((uint32_t *)kmsg->ikm_header)[i]);
2489  		}
2490  		kprintf("type: %d\n", ((mach_msg_type_descriptor_t *)(((mach_msg_base_t *)kmsg->ikm_header) + 1))->type);
2491  	}
2492  	__unreachable_ok_pop
2493  
2494  	/* Re-Compute target address if using stack-style delivery */
2495  	if (option & MACH_RCV_STACK) {
2496  		rcv_addr += rcv_size - size;
2497  	}
2498  
2499  	if (copyoutmsg((const char *) kmsg->ikm_header, rcv_addr, size)) {
2500  		mr = MACH_RCV_INVALID_DATA;
2501  		size = 0;
2502  	} else {
2503  		mr = MACH_MSG_SUCCESS;
2504  	}
2505  
2506  	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_LINK) | DBG_FUNC_NONE,
2507  	    (rcv_addr >= VM_MIN_KERNEL_AND_KEXT_ADDRESS ||
2508  	    rcv_addr + size >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) ? (uintptr_t)0 : (uintptr_t)rcv_addr,
2509  	    VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
2510  	    1 /* this is on the receive/copyout path */,
2511  	    0,
2512  	    0);
2513  	ipc_kmsg_free(kmsg);
2514  
2515  	if (sizep) {
2516  		*sizep = size;
2517  	}
2518  	return mr;
2519  }
2520  
2521  /*
2522   *	Routine:	ipc_kmsg_put_to_kernel
2523   *	Purpose:
2524   *		Copies a message buffer to a kernel message.
2525   *		Frees the message buffer.
2526   *		No errors allowed.
2527   *	Conditions:
2528   *		Nothing locked.
2529   */
2530  
2531  void
2532  ipc_kmsg_put_to_kernel(
2533  	mach_msg_header_t       *msg,
2534  	ipc_kmsg_t              kmsg,
2535  	mach_msg_size_t         size)
2536  {
2537  	(void) memcpy((void *) msg, (const void *) kmsg->ikm_header, size);
2538  
2539  	ipc_kmsg_free(kmsg);
2540  }
2541  
2542  static pthread_priority_compact_t
2543  ipc_get_current_thread_priority(void)
2544  {
2545  	thread_t thread = current_thread();
2546  	thread_qos_t qos;
2547  	int relpri;
2548  
2549  	qos = thread_get_requested_qos(thread, &relpri);
2550  	if (!qos) {
2551  		qos = thread_user_promotion_qos_for_pri(thread->base_pri);
2552  		relpri = 0;
2553  	}
2554  	return _pthread_priority_make_from_thread_qos(qos, relpri, 0);
2555  }
2556  
2557  static kern_return_t
2558  ipc_kmsg_set_qos(
2559  	ipc_kmsg_t kmsg,
2560  	mach_msg_option_t options,
2561  	mach_msg_priority_t priority)
2562  {
2563  	kern_return_t kr;
2564  	ipc_port_t special_reply_port = kmsg->ikm_header->msgh_local_port;
2565  	ipc_port_t dest_port = kmsg->ikm_header->msgh_remote_port;
2566  
2567  	if ((options & MACH_SEND_OVERRIDE) &&
2568  	    !mach_msg_priority_is_pthread_priority(priority)) {
2569  		mach_msg_qos_t qos = mach_msg_priority_qos(priority);
2570  		int relpri = mach_msg_priority_relpri(priority);
2571  		mach_msg_qos_t ovr = mach_msg_priority_overide_qos(priority);
2572  
2573  		kmsg->ikm_ppriority = _pthread_priority_make_from_thread_qos(qos, relpri, 0);
2574  		kmsg->ikm_qos_override = MAX(qos, ovr);
2575  	} else {
2576  		kr = ipc_get_pthpriority_from_kmsg_voucher(kmsg, &kmsg->ikm_ppriority);
2577  		if (kr != KERN_SUCCESS) {
2578  			if (options & MACH_SEND_PROPAGATE_QOS) {
2579  				kmsg->ikm_ppriority = ipc_get_current_thread_priority();
2580  			} else {
2581  				kmsg->ikm_ppriority = MACH_MSG_PRIORITY_UNSPECIFIED;
2582  			}
2583  		}
2584  
2585  		if (options & MACH_SEND_OVERRIDE) {
2586  			mach_msg_qos_t qos = _pthread_priority_thread_qos(kmsg->ikm_ppriority);
2587  			mach_msg_qos_t ovr = _pthread_priority_thread_qos(priority);
2588  			kmsg->ikm_qos_override = MAX(qos, ovr);
2589  		} else {
2590  			kmsg->ikm_qos_override = _pthread_priority_thread_qos(kmsg->ikm_ppriority);
2591  		}
2592  	}
2593  
2594  	kr = KERN_SUCCESS;
2595  
2596  	if (IP_VALID(special_reply_port) &&
2597  	    MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits) == MACH_MSG_TYPE_PORT_SEND_ONCE) {
2598  		if ((options & MACH_SEND_SYNC_OVERRIDE)) {
2599  			boolean_t sync_bootstrap_checkin = !!(options & MACH_SEND_SYNC_BOOTSTRAP_CHECKIN);
2600  			/*
2601  			 * Link the destination port to special reply port and make sure that
2602  			 * dest port has a send turnstile, else allocate one.
2603  			 */
2604  			ipc_port_link_special_reply_port(special_reply_port, dest_port, sync_bootstrap_checkin);
2605  		}
2606  	}
2607  	return kr;
2608  }
2609  
2610  /*
2611   *	Routine:	ipc_kmsg_link_reply_context_locked
2612   *	Purpose:
2613   *		Link any required context from the sending voucher
2614   *		to the reply port. The ipc_kmsg_copyin function will
2615   *		enforce that the sender calls mach_msg in this context.
2616   *	Conditions:
2617   *		reply port is locked
2618   */
2619  static void
2620  ipc_kmsg_link_reply_context_locked(
2621  	ipc_port_t reply_port,
2622  	ipc_port_t voucher_port)
2623  {
2624  	kern_return_t __assert_only kr;
2625  	uint32_t persona_id = 0;
2626  	ipc_voucher_t voucher;
2627  
2628  	ip_lock_held(reply_port);
2629  
2630  	if (!ip_active(reply_port)) {
2631  		return;
2632  	}
2633  
2634  	voucher = convert_port_to_voucher(voucher_port);
2635  
2636  	kr = bank_get_bank_ledger_thread_group_and_persona(voucher, NULL, NULL, &persona_id);
2637  	assert(kr == KERN_SUCCESS);
2638  	ipc_voucher_release(voucher);
2639  
2640  	if (persona_id == 0 || persona_id == PERSONA_ID_NONE) {
2641  		/* there was no persona context to record */
2642  		return;
2643  	}
2644  
2645  	/*
2646  	 * Set the persona_id as the context on the reply port.
2647  	 * This will force the thread that replies to have adopted a voucher
2648  	 * with a matching persona.
2649  	 */
2650  	reply_port->ip_reply_context = persona_id;
2651  
2652  	return;
2653  }
2654  
2655  static kern_return_t
2656  ipc_kmsg_validate_reply_port_locked(ipc_port_t reply_port, mach_msg_option_t options)
2657  {
2658  	ip_lock_held(reply_port);
2659  
2660  	if (!ip_active(reply_port)) {
2661  		/*
2662  		 * Ideally, we would enforce that the reply receive right is
2663  		 * active, but asynchronous XPC cancellation destroys the
2664  		 * receive right, so we just have to return success here.
2665  		 */
2666  		return KERN_SUCCESS;
2667  	}
2668  
2669  	if (options & MACH_SEND_MSG) {
2670  		/*
2671  		 * If the rely port is active, then it should not be
2672  		 * in-transit, and the receive right should be in the caller's
2673  		 * IPC space.
2674  		 */
2675  		if (!reply_port->ip_receiver_name || reply_port->ip_receiver != current_task()->itk_space) {
2676  			return KERN_INVALID_CAPABILITY;
2677  		}
2678  
2679  		/*
2680  		 * A port used as a reply port in an RPC should have exactly 1
2681  		 * extant send-once right which we either just made or are
2682  		 * moving as part of the IPC.
2683  		 */
2684  		if (reply_port->ip_sorights != 1) {
2685  			return KERN_INVALID_CAPABILITY;
2686  		}
2687  		/*
2688  		 * XPC uses an extra send-right to keep the name of the reply
2689  		 * right around through cancellation.  That makes it harder to
2690  		 * enforce a particular semantic kere, so for now, we say that
2691  		 * you can have a maximum of 1 send right (in addition to your
2692  		 * send once right). In the future, it would be great to lock
2693  		 * this down even further.
2694  		 */
2695  		if (reply_port->ip_srights > 1) {
2696  			return KERN_INVALID_CAPABILITY;
2697  		}
2698  
2699  		/*
2700  		 * The sender can also specify that the receive right should
2701  		 * be immovable. Note that this check only applies to
2702  		 * send-only operations. Combined send/receive or rcv-only
2703  		 * operations can specify an immovable receive right by
2704  		 * opt-ing into guarded descriptors (MACH_RCV_GUARDED_DESC)
2705  		 * and using the MACH_MSG_STRICT_REPLY options flag.
2706  		 */
2707  		if (MACH_SEND_REPLY_IS_IMMOVABLE(options)) {
2708  			if (!reply_port->ip_immovable_receive) {
2709  				return KERN_INVALID_CAPABILITY;
2710  			}
2711  		}
2712  	}
2713  
2714  	/*
2715  	 * don't enforce this yet: need a better way of indicating the
2716  	 * receiver wants this...
2717  	 */
2718  #if 0
2719  	if (MACH_RCV_WITH_IMMOVABLE_REPLY(options)) {
2720  		if (!reply_port->ip_immovable_receive) {
2721  			return KERN_INVALID_CAPABILITY;
2722  		}
2723  	}
2724  #endif /* 0  */
2725  
2726  	return KERN_SUCCESS;
2727  }
2728  
2729  /*
2730   *	Routine:	ipc_kmsg_validate_reply_context_locked
2731   *	Purpose:
2732   *		Validate that the current thread is running in the context
2733   *		required by the destination port.
2734   *	Conditions:
2735   *		dest_port is locked
2736   *	Returns:
2737   *		MACH_MSG_SUCCESS on success.
2738   *		On error, an EXC_GUARD exception is also raised.
2739   *		This function *always* resets the port reply context.
2740   */
2741  static mach_msg_return_t
2742  ipc_kmsg_validate_reply_context_locked(
2743  	mach_msg_option_t option,
2744  	ipc_port_t dest_port,
2745  	ipc_voucher_t voucher,
2746  	mach_port_name_t voucher_name)
2747  {
2748  	uint32_t dest_ctx = dest_port->ip_reply_context;
2749  	dest_port->ip_reply_context = 0;
2750  
2751  	if (!ip_active(dest_port)) {
2752  		return MACH_MSG_SUCCESS;
2753  	}
2754  
2755  	if (voucher == IPC_VOUCHER_NULL || !MACH_PORT_VALID(voucher_name)) {
2756  		if ((option & MACH_SEND_KERNEL) == 0) {
2757  			mach_port_guard_exception(voucher_name, 0,
2758  			    (MPG_FLAGS_STRICT_REPLY_INVALID_VOUCHER | dest_ctx),
2759  			    kGUARD_EXC_STRICT_REPLY);
2760  		}
2761  		return MACH_SEND_INVALID_CONTEXT;
2762  	}
2763  
2764  	kern_return_t __assert_only kr;
2765  	uint32_t persona_id = 0;
2766  	kr = bank_get_bank_ledger_thread_group_and_persona(voucher, NULL, NULL, &persona_id);
2767  	assert(kr == KERN_SUCCESS);
2768  
2769  	if (dest_ctx != persona_id) {
2770  		if ((option & MACH_SEND_KERNEL) == 0) {
2771  			mach_port_guard_exception(voucher_name, 0,
2772  			    (MPG_FLAGS_STRICT_REPLY_MISMATCHED_PERSONA | ((((uint64_t)persona_id << 32) & MPG_FLAGS_STRICT_REPLY_MASK) | dest_ctx)),
2773  			    kGUARD_EXC_STRICT_REPLY);
2774  		}
2775  		return MACH_SEND_INVALID_CONTEXT;
2776  	}
2777  
2778  	return MACH_MSG_SUCCESS;
2779  }
2780  
2781  /*
2782   *	Routine:	ipc_kmsg_copyin_header
2783   *	Purpose:
2784   *		"Copy-in" port rights in the header of a message.
2785   *		Operates atomically; if it doesn't succeed the
2786   *		message header and the space are left untouched.
2787   *		If it does succeed the remote/local port fields
2788   *		contain object pointers instead of port names,
2789   *		and the bits field is updated.  The destination port
2790   *		will be a valid port pointer.
2791   *
2792   *	Conditions:
2793   *		Nothing locked.
2794   *	Returns:
2795   *		MACH_MSG_SUCCESS	Successful copyin.
2796   *		MACH_SEND_INVALID_HEADER
2797   *			Illegal value in the message header bits.
2798   *		MACH_SEND_INVALID_DEST	The space is dead.
2799   *		MACH_SEND_INVALID_DEST	Can't copyin destination port.
2800   *			(Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
2801   *		MACH_SEND_INVALID_REPLY	Can't copyin reply port.
2802   *			(Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
2803   */
2804  
2805  mach_msg_return_t
2806  ipc_kmsg_copyin_header(
2807  	ipc_kmsg_t              kmsg,
2808  	ipc_space_t             space,
2809  	mach_msg_priority_t     priority,
2810  	mach_msg_option_t       *optionp)
2811  {
2812  	mach_msg_header_t *msg = kmsg->ikm_header;
2813  	mach_msg_bits_t mbits = msg->msgh_bits & MACH_MSGH_BITS_USER;
2814  	mach_port_name_t dest_name = CAST_MACH_PORT_TO_NAME(msg->msgh_remote_port);
2815  	mach_port_name_t reply_name = CAST_MACH_PORT_TO_NAME(msg->msgh_local_port);
2816  	mach_port_name_t voucher_name = MACH_PORT_NULL;
2817  	kern_return_t kr;
2818  
2819  	mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
2820  	mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
2821  	mach_msg_type_name_t voucher_type = MACH_MSGH_BITS_VOUCHER(mbits);
2822  	ipc_object_t dest_port = IO_NULL;
2823  	ipc_object_t reply_port = IO_NULL;
2824  	ipc_port_t dest_soright = IP_NULL;
2825  	ipc_port_t reply_soright = IP_NULL;
2826  	ipc_port_t voucher_soright = IP_NULL;
2827  	ipc_port_t release_port = IP_NULL;
2828  	ipc_port_t voucher_port = IP_NULL;
2829  	ipc_port_t voucher_release_port = IP_NULL;
2830  	ipc_entry_t dest_entry = IE_NULL;
2831  	ipc_entry_t reply_entry = IE_NULL;
2832  	ipc_entry_t voucher_entry = IE_NULL;
2833  
2834  	int assertcnt = 0;
2835  #if IMPORTANCE_INHERITANCE
2836  	boolean_t needboost = FALSE;
2837  #endif /* IMPORTANCE_INHERITANCE */
2838  
2839  	if ((mbits != msg->msgh_bits) ||
2840  	    (!MACH_MSG_TYPE_PORT_ANY_SEND(dest_type)) ||
2841  	    ((reply_type == 0) ?
2842  	    (reply_name != MACH_PORT_NULL) :
2843  	    !MACH_MSG_TYPE_PORT_ANY_SEND(reply_type))) {
2844  		return MACH_SEND_INVALID_HEADER;
2845  	}
2846  
2847  	if (!MACH_PORT_VALID(dest_name)) {
2848  		return MACH_SEND_INVALID_DEST;
2849  	}
2850  
2851  	is_write_lock(space);
2852  	if (!is_active(space)) {
2853  		is_write_unlock(space);
2854  		return MACH_SEND_INVALID_DEST;
2855  	}
2856  	/* space locked and active */
2857  
2858  	/*
2859  	 *	If there is a voucher specified, make sure the disposition is
2860  	 *	valid and the entry actually refers to a voucher port.  Don't
2861  	 *	actually copy in until we validate destination and reply.
2862  	 */
2863  	if (voucher_type != MACH_MSGH_BITS_ZERO) {
2864  		voucher_name = msg->msgh_voucher_port;
2865  
2866  		if (voucher_name == MACH_PORT_DEAD ||
2867  		    (voucher_type != MACH_MSG_TYPE_MOVE_SEND &&
2868  		    voucher_type != MACH_MSG_TYPE_COPY_SEND)) {
2869  			is_write_unlock(space);
2870  			if ((*optionp & MACH_SEND_KERNEL) == 0) {
2871  				mach_port_guard_exception(voucher_name, 0, 0, kGUARD_EXC_SEND_INVALID_VOUCHER);
2872  			}
2873  			return MACH_SEND_INVALID_VOUCHER;
2874  		}
2875  
2876  		if (voucher_name != MACH_PORT_NULL) {
2877  			voucher_entry = ipc_entry_lookup(space, voucher_name);
2878  			if (voucher_entry == IE_NULL ||
2879  			    (voucher_entry->ie_bits & MACH_PORT_TYPE_SEND) == 0 ||
2880  			    io_kotype(voucher_entry->ie_object) != IKOT_VOUCHER) {
2881  				is_write_unlock(space);
2882  				if ((*optionp & MACH_SEND_KERNEL) == 0) {
2883  					mach_port_guard_exception(voucher_name, 0, 0, kGUARD_EXC_SEND_INVALID_VOUCHER);
2884  				}
2885  				return MACH_SEND_INVALID_VOUCHER;
2886  			}
2887  		} else {
2888  			voucher_type = MACH_MSG_TYPE_MOVE_SEND;
2889  		}
2890  	}
2891  
2892  	if (enforce_strict_reply && MACH_SEND_WITH_STRICT_REPLY(*optionp) &&
2893  	    (!MACH_PORT_VALID(reply_name) ||
2894  	    ((reply_type != MACH_MSG_TYPE_MAKE_SEND_ONCE) && (reply_type != MACH_MSG_TYPE_MOVE_SEND_ONCE))
2895  	    )) {
2896  		/*
2897  		 * The caller cannot enforce a reply context with an invalid
2898  		 * reply port name, or a non-send_once reply disposition.
2899  		 */
2900  		is_write_unlock(space);
2901  		if ((*optionp & MACH_SEND_KERNEL) == 0) {
2902  			mach_port_guard_exception(reply_name, 0,
2903  			    (MPG_FLAGS_STRICT_REPLY_INVALID_REPLY_DISP | reply_type),
2904  			    kGUARD_EXC_STRICT_REPLY);
2905  		}
2906  		return MACH_SEND_INVALID_REPLY;
2907  	}
2908  
2909  	/*
2910  	 *	Handle combinations of validating destination and reply; along
2911  	 *	with copying in destination, reply, and voucher in an atomic way.
2912  	 */
2913  
2914  	if (dest_name == voucher_name) {
2915  		/*
2916  		 *	If the destination name is the same as the voucher name,
2917  		 *	the voucher_entry must already be known.  Either that or
2918  		 *	the destination name is MACH_PORT_NULL (i.e. invalid).
2919  		 */
2920  		dest_entry = voucher_entry;
2921  		if (dest_entry == IE_NULL) {
2922  			goto invalid_dest;
2923  		}
2924  
2925  		/*
2926  		 *	Make sure a future copyin of the reply port will succeed.
2927  		 *	Once we start copying in the dest/voucher pair, we can't
2928  		 *	back out.
2929  		 */
2930  		if (MACH_PORT_VALID(reply_name)) {
2931  			assert(reply_type != 0); /* because reply_name not null */
2932  
2933  			/* It is just WRONG if dest, voucher, and reply are all the same. */
2934  			if (voucher_name == reply_name) {
2935  				goto invalid_reply;
2936  			}
2937  			reply_entry = ipc_entry_lookup(space, reply_name);
2938  			if (reply_entry == IE_NULL) {
2939  				goto invalid_reply;
2940  			}
2941  			assert(dest_entry != reply_entry); /* names are not equal */
2942  			if (!ipc_right_copyin_check_reply(space, reply_name, reply_entry, reply_type)) {
2943  				goto invalid_reply;
2944  			}
2945  		}
2946  
2947  		/*
2948  		 *	Do the joint copyin of the dest disposition and
2949  		 *	voucher disposition from the one entry/port.  We
2950  		 *	already validated that the voucher copyin would
2951  		 *	succeed (above).  So, any failure in combining
2952  		 *	the copyins can be blamed on the destination.
2953  		 */
2954  		kr = ipc_right_copyin_two(space, dest_name, dest_entry,
2955  		    dest_type, voucher_type, &dest_port, &dest_soright,
2956  		    &release_port);
2957  		if (kr != KERN_SUCCESS) {
2958  			assert(kr != KERN_INVALID_CAPABILITY);
2959  			goto invalid_dest;
2960  		}
2961  		voucher_port = ip_object_to_port(dest_port);
2962  
2963  		/*
2964  		 * could not have been one of these dispositions,
2965  		 * validated the port was a true kernel voucher port above,
2966  		 * AND was successfully able to copyin both dest and voucher.
2967  		 */
2968  		assert(dest_type != MACH_MSG_TYPE_MAKE_SEND);
2969  		assert(dest_type != MACH_MSG_TYPE_MAKE_SEND_ONCE);
2970  		assert(dest_type != MACH_MSG_TYPE_MOVE_SEND_ONCE);
2971  
2972  		/*
2973  		 *	Perform the delayed reply right copyin (guaranteed success).
2974  		 */
2975  		if (reply_entry != IE_NULL) {
2976  			kr = ipc_right_copyin(space, reply_name, reply_entry,
2977  			    reply_type, IPC_OBJECT_COPYIN_FLAGS_DEADOK,
2978  			    &reply_port, &reply_soright,
2979  			    &release_port, &assertcnt, 0, NULL);
2980  			assert(assertcnt == 0);
2981  			assert(kr == KERN_SUCCESS);
2982  		}
2983  	} else {
2984  		if (dest_name == reply_name) {
2985  			/*
2986  			 *	Destination and reply ports are the same!
2987  			 *	This is very similar to the case where the
2988  			 *	destination and voucher ports were the same
2989  			 *	(except the reply port disposition is not
2990  			 *	previously validated).
2991  			 */
2992  			dest_entry = ipc_entry_lookup(space, dest_name);
2993  			if (dest_entry == IE_NULL) {
2994  				goto invalid_dest;
2995  			}
2996  
2997  			reply_entry = dest_entry;
2998  			assert(reply_type != 0); /* because name not null */
2999  
3000  			/*
3001  			 *	Pre-validate that the reply right can be copied in by itself
3002  			 */
3003  			if (!ipc_right_copyin_check_reply(space, reply_name, reply_entry, reply_type)) {
3004  				goto invalid_reply;
3005  			}
3006  
3007  			/*
3008  			 *	Do the joint copyin of the dest disposition and
3009  			 *	reply disposition from the one entry/port.
3010  			 */
3011  			kr = ipc_right_copyin_two(space, dest_name, dest_entry,
3012  			    dest_type, reply_type, &dest_port, &dest_soright,
3013  			    &release_port);
3014  			if (kr == KERN_INVALID_CAPABILITY) {
3015  				goto invalid_reply;
3016  			} else if (kr != KERN_SUCCESS) {
3017  				goto invalid_dest;
3018  			}
3019  			reply_port = dest_port;
3020  		} else {
3021  			/*
3022  			 *	Handle destination and reply independently, as
3023  			 *	they are independent entries (even if the entries
3024  			 *	refer to the same port).
3025  			 *
3026  			 *	This can be the tough case to make atomic.
3027  			 *
3028  			 *	The difficult problem is serializing with port death.
3029  			 *	The bad case is when dest_port dies after its copyin,
3030  			 *	reply_port dies before its copyin, and dest_port dies before
3031  			 *	reply_port.  Then the copyins operated as if dest_port was
3032  			 *	alive and reply_port was dead, which shouldn't have happened
3033  			 *	because they died in the other order.
3034  			 *
3035  			 *	Note that it is easy for a user task to tell if
3036  			 *	a copyin happened before or after a port died.
3037  			 *	If a port dies before copyin, a dead-name notification
3038  			 *	is generated and the dead name's urefs are incremented,
3039  			 *	and if the copyin happens first, a port-deleted
3040  			 *	notification is generated.
3041  			 *
3042  			 *	Even so, avoiding that potentially detectable race is too
3043  			 *	expensive - and no known code cares about it.  So, we just
3044  			 *	do the expedient thing and copy them in one after the other.
3045  			 */
3046  
3047  			dest_entry = ipc_entry_lookup(space, dest_name);
3048  			if (dest_entry == IE_NULL) {
3049  				goto invalid_dest;
3050  			}
3051  			assert(dest_entry != voucher_entry);
3052  
3053  			/*
3054  			 *	Make sure reply port entry is valid before dest copyin.
3055  			 */
3056  			if (MACH_PORT_VALID(reply_name)) {
3057  				if (reply_name == voucher_name) {
3058  					goto invalid_reply;
3059  				}
3060  				reply_entry = ipc_entry_lookup(space, reply_name);
3061  				if (reply_entry == IE_NULL) {
3062  					goto invalid_reply;
3063  				}
3064  				assert(dest_entry != reply_entry); /* names are not equal */
3065  				assert(reply_type != 0); /* because reply_name not null */
3066  
3067  				if (!ipc_right_copyin_check_reply(space, reply_name, reply_entry, reply_type)) {
3068  					goto invalid_reply;
3069  				}
3070  			}
3071  
3072  			/*
3073  			 *	copyin the destination.
3074  			 */
3075  			kr = ipc_right_copyin(space, dest_name, dest_entry,
3076  			    dest_type, (IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND |
3077  			    IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE),
3078  			    &dest_port, &dest_soright,
3079  			    &release_port, &assertcnt, 0, NULL);
3080  			assert(assertcnt == 0);
3081  			if (kr != KERN_SUCCESS) {
3082  				goto invalid_dest;
3083  			}
3084  			assert(IO_VALID(dest_port));
3085  			assert(!IP_VALID(release_port));
3086  
3087  			/*
3088  			 *	Copyin the pre-validated reply right.
3089  			 *	It's OK if the reply right has gone dead in the meantime.
3090  			 */
3091  			if (MACH_PORT_VALID(reply_name)) {
3092  				kr = ipc_right_copyin(space, reply_name, reply_entry,
3093  				    reply_type, IPC_OBJECT_COPYIN_FLAGS_DEADOK,
3094  				    &reply_port, &reply_soright,
3095  				    &release_port, &assertcnt, 0, NULL);
3096  				assert(assertcnt == 0);
3097  				assert(kr == KERN_SUCCESS);
3098  			} else {
3099  				/* convert invalid name to equivalent ipc_object type */
3100  				reply_port = ip_to_object(CAST_MACH_NAME_TO_PORT(reply_name));
3101  			}
3102  		}
3103  
3104  		/*
3105  		 * Finally can copyin the voucher right now that dest and reply
3106  		 * are fully copied in (guaranteed success).
3107  		 */
3108  		if (IE_NULL != voucher_entry) {
3109  			kr = ipc_right_copyin(space, voucher_name, voucher_entry,
3110  			    voucher_type, IPC_OBJECT_COPYIN_FLAGS_NONE,
3111  			    (ipc_object_t *)&voucher_port,
3112  			    &voucher_soright,
3113  			    &voucher_release_port,
3114  			    &assertcnt, 0, NULL);
3115  			assert(assertcnt == 0);
3116  			assert(KERN_SUCCESS == kr);
3117  			assert(IP_VALID(voucher_port));
3118  			require_ip_active(voucher_port);
3119  		}
3120  	}
3121  
3122  	/*
3123  	 * The entries might need to be deallocated.
3124  	 *
3125  	 * Each entry should be deallocated only once,
3126  	 * even if it was specified in more than one slot in the header.
3127  	 * Note that dest can be the same entry as reply or voucher,
3128  	 * but reply and voucher must be distinct entries.
3129  	 */
3130  	assert(IE_NULL != dest_entry);
3131  	if (IE_NULL != reply_entry) {
3132  		assert(reply_entry != voucher_entry);
3133  	}
3134  
3135  	if (IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_NONE) {
3136  		ipc_entry_dealloc(space, dest_name, dest_entry);
3137  
3138  		if (dest_entry == reply_entry) {
3139  			reply_entry = IE_NULL;
3140  		}
3141  
3142  		if (dest_entry == voucher_entry) {
3143  			voucher_entry = IE_NULL;
3144  		}
3145  
3146  		dest_entry = IE_NULL;
3147  	}
3148  	if (IE_NULL != reply_entry &&
3149  	    IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_NONE) {
3150  		ipc_entry_dealloc(space, reply_name, reply_entry);
3151  		reply_entry = IE_NULL;
3152  	}
3153  	if (IE_NULL != voucher_entry &&
3154  	    IE_BITS_TYPE(voucher_entry->ie_bits) == MACH_PORT_TYPE_NONE) {
3155  		ipc_entry_dealloc(space, voucher_name, voucher_entry);
3156  		voucher_entry = IE_NULL;
3157  	}
3158  
3159  	dest_type = ipc_object_copyin_type(dest_type);
3160  	reply_type = ipc_object_copyin_type(reply_type);
3161  
3162  	/*
3163  	 *	If the dest port is a kobject AND its receive right belongs to kernel, allow
3164  	 *  copyin of immovable send rights in the message body (port descriptor) to
3165  	 *  succeed since those send rights are simply "moved" or "copied" into kernel.
3166  	 *
3167  	 *  See: ipc_object_copyin().
3168  	 */
3169  	if (io_is_kobject(dest_port) &&
3170  	    ip_object_to_port(dest_port)->ip_receiver == ipc_space_kernel) {
3171  		assert(io_kotype(dest_port) != IKOT_HOST_NOTIFY && io_kotype(dest_port) != IKOT_TIMER);
3172  		kmsg->ikm_flags |= IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND;
3173  	}
3174  
3175  	/*
3176  	 * JMM - Without rdar://problem/6275821, this is the last place we can
3177  	 * re-arm the send-possible notifications.  It may trigger unexpectedly
3178  	 * early (send may NOT have failed), but better than missing.  We assure
3179  	 * we won't miss by forcing MACH_SEND_ALWAYS if we got past arming.
3180  	 */
3181  	if (((*optionp & MACH_SEND_NOTIFY) != 0) &&
3182  	    dest_type != MACH_MSG_TYPE_PORT_SEND_ONCE &&
3183  	    dest_entry != IE_NULL && dest_entry->ie_request != IE_REQ_NONE) {
3184  		ipc_port_t dport = ip_object_to_port(dest_port);
3185  
3186  		assert(dport != IP_NULL);
3187  		ip_lock(dport);
3188  		if (ip_active(dport) && dport->ip_receiver != ipc_space_kernel) {
3189  			if (ip_full(dport)) {
3190  #if IMPORTANCE_INHERITANCE
3191  				needboost = ipc_port_request_sparm(dport, dest_name,
3192  				    dest_entry->ie_request,
3193  				    *optionp,
3194  				    priority);
3195  				if (needboost == FALSE) {
3196  					ip_unlock(dport);
3197  				}
3198  #else
3199  				ipc_port_request_sparm(dport, dest_name,
3200  				    dest_entry->ie_request,
3201  				    *optionp,
3202  				    priority);
3203  				ip_unlock(dport);
3204  #endif /* IMPORTANCE_INHERITANCE */
3205  			} else {
3206  				*optionp |= MACH_SEND_ALWAYS;
3207  				ip_unlock(dport);
3208  			}
3209  		} else {
3210  			ip_unlock(dport);
3211  		}
3212  	}
3213  
3214  	is_write_unlock(space);
3215  
3216  #if IMPORTANCE_INHERITANCE
3217  	/*
3218  	 * If our request is the first boosting send-possible
3219  	 * notification this cycle, push the boost down the
3220  	 * destination port.
3221  	 */
3222  	if (needboost == TRUE) {
3223  		ipc_port_t dport = ip_object_to_port(dest_port);
3224  
3225  		/* dport still locked from above */
3226  		if (ipc_port_importance_delta(dport, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) {
3227  			ip_unlock(dport);
3228  		}
3229  	}
3230  #endif /* IMPORTANCE_INHERITANCE */
3231  
3232  	if (dest_soright != IP_NULL) {
3233  		ipc_notify_port_deleted(dest_soright, dest_name);
3234  	}
3235  	if (reply_soright != IP_NULL) {
3236  		ipc_notify_port_deleted(reply_soright, reply_name);
3237  	}
3238  	if (voucher_soright != IP_NULL) {
3239  		ipc_notify_port_deleted(voucher_soright, voucher_name);
3240  	}
3241  
3242  	/*
3243  	 * No room to store voucher port in in-kernel msg header,
3244  	 * so we store it back in the kmsg itself.  Extract the
3245  	 * qos, and apply any override before we enqueue the kmsg.
3246  	 */
3247  	if (IP_VALID(voucher_port)) {
3248  		kmsg->ikm_voucher = voucher_port;
3249  		voucher_type = MACH_MSG_TYPE_MOVE_SEND;
3250  	}
3251  
3252  	msg->msgh_bits = MACH_MSGH_BITS_SET(dest_type, reply_type, voucher_type, mbits);
3253  	msg->msgh_remote_port = ip_object_to_port(dest_port);
3254  	msg->msgh_local_port = ip_object_to_port(reply_port);
3255  
3256  	/* capture the qos value(s) for the kmsg */
3257  	ipc_kmsg_set_qos(kmsg, *optionp, priority);
3258  
3259  	if (release_port != IP_NULL) {
3260  		ip_release(release_port);
3261  	}
3262  
3263  	if (voucher_release_port != IP_NULL) {
3264  		ip_release(voucher_release_port);
3265  	}
3266  
3267  	if (enforce_strict_reply && MACH_SEND_WITH_STRICT_REPLY(*optionp) && IP_VALID(msg->msgh_local_port)) {
3268  		/*
3269  		 * We've already validated that the reply disposition is a
3270  		 * [make/move] send-once. Ideally, we should enforce that the
3271  		 * reply port is also not dead, but XPC asynchronous
3272  		 * cancellation can make the reply port dead before we
3273  		 * actually make it to the mach_msg send.
3274  		 *
3275  		 * Here, we ensure that if we have a non-dead reply port, then
3276  		 * the reply port's receive right should not be in-transit,
3277  		 * and should live in the caller's IPC space.
3278  		 */
3279  		ipc_port_t rport = msg->msgh_local_port;
3280  		ip_lock(rport);
3281  		kr = ipc_kmsg_validate_reply_port_locked(rport, *optionp);
3282  		ip_unlock(rport);
3283  		if (kr != KERN_SUCCESS) {
3284  			/*
3285  			 * no descriptors have been copied in yet, but the
3286  			 * full header has been copied in: clean it up
3287  			 */
3288  			ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
3289  			if ((*optionp & MACH_SEND_KERNEL) == 0) {
3290  				mach_port_guard_exception(reply_name, 0,
3291  				    (MPG_FLAGS_STRICT_REPLY_INVALID_REPLY_PORT | kr),
3292  				    kGUARD_EXC_STRICT_REPLY);
3293  			}
3294  			return MACH_SEND_INVALID_REPLY;
3295  		}
3296  	}
3297  
3298  	return MACH_MSG_SUCCESS;
3299  
3300  invalid_reply:
3301  	is_write_unlock(space);
3302  
3303  	if (release_port != IP_NULL) {
3304  		ip_release(release_port);
3305  	}
3306  
3307  	assert(voucher_port == IP_NULL);
3308  	assert(voucher_soright == IP_NULL);
3309  
3310  	if ((*optionp & MACH_SEND_KERNEL) == 0) {
3311  		mach_port_guard_exception(reply_name, 0, 0, kGUARD_EXC_SEND_INVALID_REPLY);
3312  	}
3313  	return MACH_SEND_INVALID_REPLY;
3314  
3315  invalid_dest:
3316  	is_write_unlock(space);
3317  
3318  	if (release_port != IP_NULL) {
3319  		ip_release(release_port);
3320  	}
3321  
3322  	if (reply_soright != IP_NULL) {
3323  		ipc_notify_port_deleted(reply_soright, reply_name);
3324  	}
3325  
3326  	assert(voucher_port == IP_NULL);
3327  	assert(voucher_soright == IP_NULL);
3328  
3329  	return MACH_SEND_INVALID_DEST;
3330  }
3331  
3332  static mach_msg_descriptor_t *
3333  ipc_kmsg_copyin_port_descriptor(
3334  	mach_msg_port_descriptor_t *dsc,
3335  	mach_msg_legacy_port_descriptor_t *user_dsc_in,
3336  	ipc_space_t space,
3337  	ipc_object_t dest,
3338  	ipc_kmsg_t kmsg,
3339  	mach_msg_option_t *optionp,
3340  	mach_msg_return_t *mr)
3341  {
3342  	mach_msg_legacy_port_descriptor_t user_dsc = *user_dsc_in;
3343  	mach_msg_type_name_t        user_disp;
3344  	mach_msg_type_name_t        result_disp;
3345  	mach_port_name_t            name;
3346  	ipc_object_t                        object;
3347  
3348  	user_disp = user_dsc.disposition;
3349  	result_disp = ipc_object_copyin_type(user_disp);
3350  
3351  	name = (mach_port_name_t)user_dsc.name;
3352  	if (MACH_PORT_VALID(name)) {
3353  		kern_return_t kr = ipc_object_copyin(space, name, user_disp, &object, 0, NULL, kmsg->ikm_flags);
3354  		if (kr != KERN_SUCCESS) {
3355  			if (((*optionp & MACH_SEND_KERNEL) == 0) && (kr == KERN_INVALID_RIGHT)) {
3356  				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT);
3357  			}
3358  			*mr = MACH_SEND_INVALID_RIGHT;
3359  			return NULL;
3360  		}
3361  
3362  		if ((result_disp == MACH_MSG_TYPE_PORT_RECEIVE) &&
3363  		    ipc_port_check_circularity(ip_object_to_port(object),
3364  		    ip_object_to_port(dest))) {
3365  			kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
3366  		}
3367  		dsc->name = ip_object_to_port(object);
3368  	} else {
3369  		dsc->name = CAST_MACH_NAME_TO_PORT(name);
3370  	}
3371  	dsc->disposition = result_disp;
3372  	dsc->type = MACH_MSG_PORT_DESCRIPTOR;
3373  
3374  	dsc->pad_end = 0;         // debug, unnecessary
3375  
3376  	return (mach_msg_descriptor_t *)(user_dsc_in + 1);
3377  }
3378  
3379  static mach_msg_descriptor_t *
3380  ipc_kmsg_copyin_ool_descriptor(
3381  	mach_msg_ool_descriptor_t *dsc,
3382  	mach_msg_descriptor_t *user_dsc,
3383  	int is_64bit,
3384  	vm_offset_t *paddr,
3385  	vm_map_copy_t *copy,
3386  	vm_size_t *space_needed,
3387  	vm_map_t map,
3388  	__unused mach_msg_option_t *optionp,
3389  	mach_msg_return_t *mr)
3390  {
3391  	vm_size_t                           length;
3392  	boolean_t                           dealloc;
3393  	mach_msg_copy_options_t             copy_options;
3394  	mach_vm_offset_t            addr;
3395  	mach_msg_descriptor_type_t  dsc_type;
3396  
3397  	if (is_64bit) {
3398  		mach_msg_ool_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
3399  
3400  		addr = (mach_vm_offset_t) user_ool_dsc->address;
3401  		length = user_ool_dsc->size;
3402  		dealloc = user_ool_dsc->deallocate;
3403  		copy_options = user_ool_dsc->copy;
3404  		dsc_type = user_ool_dsc->type;
3405  
3406  		user_dsc = (typeof(user_dsc))(user_ool_dsc + 1);
3407  	} else {
3408  		mach_msg_ool_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
3409  
3410  		addr = CAST_USER_ADDR_T(user_ool_dsc->address);
3411  		dealloc = user_ool_dsc->deallocate;
3412  		copy_options = user_ool_dsc->copy;
3413  		dsc_type = user_ool_dsc->type;
3414  		length = user_ool_dsc->size;
3415  
3416  		user_dsc = (typeof(user_dsc))(user_ool_dsc + 1);
3417  	}
3418  
3419  	dsc->size = (mach_msg_size_t)length;
3420  	dsc->deallocate = dealloc;
3421  	dsc->copy = copy_options;
3422  	dsc->type = dsc_type;
3423  
3424  	if (length == 0) {
3425  		dsc->address = NULL;
3426  	} else if ((length >= MSG_OOL_SIZE_SMALL) &&
3427  	    (copy_options == MACH_MSG_PHYSICAL_COPY) && !dealloc) {
3428  		/*
3429  		 * If the request is a physical copy and the source
3430  		 * is not being deallocated, then allocate space
3431  		 * in the kernel's pageable ipc copy map and copy
3432  		 * the data in.  The semantics guarantee that the
3433  		 * data will have been physically copied before
3434  		 * the send operation terminates.  Thus if the data
3435  		 * is not being deallocated, we must be prepared
3436  		 * to page if the region is sufficiently large.
3437  		 */
3438  		if (copyin(addr, (char *)*paddr, length)) {
3439  			*mr = MACH_SEND_INVALID_MEMORY;
3440  			return NULL;
3441  		}
3442  
3443  		/*
3444  		 * The kernel ipc copy map is marked no_zero_fill.
3445  		 * If the transfer is not a page multiple, we need
3446  		 * to zero fill the balance.
3447  		 */
3448  		if (!page_aligned(length)) {
3449  			(void) memset((void *) (*paddr + length), 0,
3450  			    round_page(length) - length);
3451  		}
3452  		if (vm_map_copyin(ipc_kernel_copy_map, (vm_map_address_t)*paddr,
3453  		    (vm_map_size_t)length, TRUE, copy) != KERN_SUCCESS) {
3454  			*mr = MACH_MSG_VM_KERNEL;
3455  			return NULL;
3456  		}
3457  		dsc->address = (void *)*copy;
3458  		*paddr += round_page(length);
3459  		*space_needed -= round_page(length);
3460  	} else {
3461  		/*
3462  		 * Make a vm_map_copy_t of the of the data.  If the
3463  		 * data is small, this will do an optimized physical
3464  		 * copy.  Otherwise, it will do a virtual copy.
3465  		 *
3466  		 * NOTE: A virtual copy is OK if the original is being
3467  		 * deallocted, even if a physical copy was requested.
3468  		 */
3469  		kern_return_t kr = vm_map_copyin(map, addr,
3470  		    (vm_map_size_t)length, dealloc, copy);
3471  		if (kr != KERN_SUCCESS) {
3472  			*mr = (kr == KERN_RESOURCE_SHORTAGE) ?
3473  			    MACH_MSG_VM_KERNEL :
3474  			    MACH_SEND_INVALID_MEMORY;
3475  			return NULL;
3476  		}
3477  		dsc->address = (void *)*copy;
3478  	}
3479  
3480  	return user_dsc;
3481  }
3482  
3483  static mach_msg_descriptor_t *
3484  ipc_kmsg_copyin_ool_ports_descriptor(
3485  	mach_msg_ool_ports_descriptor_t *dsc,
3486  	mach_msg_descriptor_t *user_dsc,
3487  	int is_64bit,
3488  	vm_map_t map,
3489  	ipc_space_t space,
3490  	ipc_object_t dest,
3491  	ipc_kmsg_t kmsg,
3492  	mach_msg_option_t *optionp,
3493  	mach_msg_return_t *mr)
3494  {
3495  	void *data;
3496  	ipc_object_t *objects;
3497  	unsigned int i;
3498  	mach_vm_offset_t addr;
3499  	mach_msg_type_name_t user_disp;
3500  	mach_msg_type_name_t result_disp;
3501  	mach_msg_type_number_t count;
3502  	mach_msg_copy_options_t copy_option;
3503  	boolean_t deallocate;
3504  	mach_msg_descriptor_type_t type;
3505  	vm_size_t ports_length, names_length;
3506  
3507  	if (is_64bit) {
3508  		mach_msg_ool_ports_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
3509  
3510  		addr = (mach_vm_offset_t)user_ool_dsc->address;
3511  		count = user_ool_dsc->count;
3512  		deallocate = user_ool_dsc->deallocate;
3513  		copy_option = user_ool_dsc->copy;
3514  		user_disp = user_ool_dsc->disposition;
3515  		type = user_ool_dsc->type;
3516  
3517  		user_dsc = (typeof(user_dsc))(user_ool_dsc + 1);
3518  	} else {
3519  		mach_msg_ool_ports_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
3520  
3521  		addr = CAST_USER_ADDR_T(user_ool_dsc->address);
3522  		count = user_ool_dsc->count;
3523  		deallocate = user_ool_dsc->deallocate;
3524  		copy_option = user_ool_dsc->copy;
3525  		user_disp = user_ool_dsc->disposition;
3526  		type = user_ool_dsc->type;
3527  
3528  		user_dsc = (typeof(user_dsc))(user_ool_dsc + 1);
3529  	}
3530  
3531  	dsc->deallocate = deallocate;
3532  	dsc->copy = copy_option;
3533  	dsc->type = type;
3534  	dsc->count = count;
3535  	dsc->address = NULL; /* for now */
3536  
3537  	result_disp = ipc_object_copyin_type(user_disp);
3538  	dsc->disposition = result_disp;
3539  
3540  	/* We always do a 'physical copy', but you have to specify something valid */
3541  	if (copy_option != MACH_MSG_PHYSICAL_COPY &&
3542  	    copy_option != MACH_MSG_VIRTUAL_COPY) {
3543  		*mr = MACH_SEND_INVALID_TYPE;
3544  		return NULL;
3545  	}
3546  
3547  	/* calculate length of data in bytes, rounding up */
3548  
3549  	if (os_mul_overflow(count, sizeof(mach_port_t), &ports_length)) {
3550  		*mr = MACH_SEND_TOO_LARGE;
3551  		return NULL;
3552  	}
3553  
3554  	if (os_mul_overflow(count, sizeof(mach_port_name_t), &names_length)) {
3555  		*mr = MACH_SEND_TOO_LARGE;
3556  		return NULL;
3557  	}
3558  
3559  	if (ports_length == 0) {
3560  		return user_dsc;
3561  	}
3562  
3563  	data = kalloc(ports_length);
3564  
3565  	if (data == NULL) {
3566  		*mr = MACH_SEND_NO_BUFFER;
3567  		return NULL;
3568  	}
3569  
3570  #ifdef __LP64__
3571  	mach_port_name_t *names = &((mach_port_name_t *)data)[count];
3572  #else
3573  	mach_port_name_t *names = ((mach_port_name_t *)data);
3574  #endif
3575  
3576  	if (copyinmap(map, addr, names, names_length) != KERN_SUCCESS) {
3577  		kfree(data, ports_length);
3578  		*mr = MACH_SEND_INVALID_MEMORY;
3579  		return NULL;
3580  	}
3581  
3582  	if (deallocate) {
3583  		(void) mach_vm_deallocate(map, addr, (mach_vm_size_t)names_length);
3584  	}
3585  
3586  	objects = (ipc_object_t *) data;
3587  	dsc->address = data;
3588  
3589  	for (i = 0; i < count; i++) {
3590  		mach_port_name_t name = names[i];
3591  		ipc_object_t object;
3592  
3593  		if (!MACH_PORT_VALID(name)) {
3594  			objects[i] = ip_to_object(CAST_MACH_NAME_TO_PORT(name));
3595  			continue;
3596  		}
3597  
3598  		kern_return_t kr = ipc_object_copyin(space, name, user_disp, &object, 0, NULL, kmsg->ikm_flags);
3599  
3600  		if (kr != KERN_SUCCESS) {
3601  			unsigned int j;
3602  
3603  			for (j = 0; j < i; j++) {
3604  				object = objects[j];
3605  				if (IPC_OBJECT_VALID(object)) {
3606  					ipc_object_destroy(object, result_disp);
3607  				}
3608  			}
3609  			kfree(data, ports_length);
3610  			dsc->address = NULL;
3611  			if (((*optionp & MACH_SEND_KERNEL) == 0) && (kr == KERN_INVALID_RIGHT)) {
3612  				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT);
3613  			}
3614  			*mr = MACH_SEND_INVALID_RIGHT;
3615  			return NULL;
3616  		}
3617  
3618  		if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
3619  		    ipc_port_check_circularity(ip_object_to_port(object),
3620  		    ip_object_to_port(dest))) {
3621  			kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
3622  		}
3623  
3624  		objects[i] = object;
3625  	}
3626  
3627  	return user_dsc;
3628  }
3629  
3630  static mach_msg_descriptor_t *
3631  ipc_kmsg_copyin_guarded_port_descriptor(
3632  	mach_msg_guarded_port_descriptor_t *dsc,
3633  	mach_msg_descriptor_t *user_addr,
3634  	int is_64bit,
3635  	ipc_space_t space,
3636  	ipc_object_t dest,
3637  	ipc_kmsg_t kmsg,
3638  	mach_msg_option_t *optionp,
3639  	mach_msg_return_t *mr)
3640  {
3641  	mach_msg_descriptor_t       *user_dsc;
3642  	mach_msg_type_name_t        disp;
3643  	mach_msg_type_name_t        result_disp;
3644  	mach_port_name_t            name;
3645  	mach_msg_guard_flags_t      guard_flags;
3646  	ipc_object_t                object;
3647  	mach_port_context_t         context;
3648  
3649  	if (!is_64bit) {
3650  		mach_msg_guarded_port_descriptor32_t *user_gp_dsc = (typeof(user_gp_dsc))user_addr;
3651  		name = user_gp_dsc->name;
3652  		guard_flags = user_gp_dsc->flags;
3653  		disp = user_gp_dsc->disposition;
3654  		context = user_gp_dsc->context;
3655  		user_dsc = (mach_msg_descriptor_t *)(user_gp_dsc + 1);
3656  	} else {
3657  		mach_msg_guarded_port_descriptor64_t *user_gp_dsc = (typeof(user_gp_dsc))user_addr;
3658  		name = user_gp_dsc->name;
3659  		guard_flags = user_gp_dsc->flags;
3660  		disp = user_gp_dsc->disposition;
3661  		context = user_gp_dsc->context;
3662  		user_dsc = (mach_msg_descriptor_t *)(user_gp_dsc + 1);
3663  	}
3664  
3665  	guard_flags &= MACH_MSG_GUARD_FLAGS_MASK;
3666  	result_disp = ipc_object_copyin_type(disp);
3667  
3668  	if (MACH_PORT_VALID(name)) {
3669  		kern_return_t kr = ipc_object_copyin(space, name, disp, &object, context, &guard_flags, kmsg->ikm_flags);
3670  		if (kr != KERN_SUCCESS) {
3671  			if (((*optionp & MACH_SEND_KERNEL) == 0) && (kr == KERN_INVALID_RIGHT)) {
3672  				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT);
3673  			}
3674  			*mr = MACH_SEND_INVALID_RIGHT;
3675  			return NULL;
3676  		}
3677  
3678  		if ((result_disp == MACH_MSG_TYPE_PORT_RECEIVE) &&
3679  		    ipc_port_check_circularity(ip_object_to_port(object),
3680  		    ip_object_to_port(dest))) {
3681  			kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
3682  		}
3683  		dsc->name = ip_object_to_port(object);
3684  	} else {
3685  		dsc->name = CAST_MACH_NAME_TO_PORT(name);
3686  	}
3687  	dsc->flags = guard_flags;
3688  	dsc->disposition = result_disp;
3689  	dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
3690  
3691  #if __LP64__
3692  	dsc->pad_end = 0;         // debug, unnecessary
3693  #endif
3694  
3695  	return user_dsc;
3696  }
3697  
3698  
3699  /*
3700   *	Routine:	ipc_kmsg_copyin_body
3701   *	Purpose:
3702   *		"Copy-in" port rights and out-of-line memory
3703   *		in the message body.
3704   *
3705   *		In all failure cases, the message is left holding
3706   *		no rights or memory.  However, the message buffer
3707   *		is not deallocated.  If successful, the message
3708   *		contains a valid destination port.
3709   *	Conditions:
3710   *		Nothing locked.
3711   *	Returns:
3712   *		MACH_MSG_SUCCESS	Successful copyin.
3713   *		MACH_SEND_INVALID_MEMORY	Can't grab out-of-line memory.
3714   *		MACH_SEND_INVALID_RIGHT	Can't copyin port right in body.
3715   *		MACH_SEND_INVALID_TYPE	Bad type specification.
3716   *		MACH_SEND_MSG_TOO_SMALL	Body is too small for types/data.
3717   *		MACH_SEND_INVALID_RT_OOL_SIZE OOL Buffer too large for RT
3718   *		MACH_MSG_INVALID_RT_DESCRIPTOR Dealloc and RT are incompatible
3719   *		MACH_SEND_NO_GRANT_DEST	Dest port doesn't accept ports in body
3720   */
3721  
3722  mach_msg_return_t
3723  ipc_kmsg_copyin_body(
3724  	ipc_kmsg_t      kmsg,
3725  	ipc_space_t     space,
3726  	vm_map_t    map,
3727  	mach_msg_option_t *optionp)
3728  {
3729  	ipc_object_t                dest;
3730  	mach_msg_body_t             *body;
3731  	mach_msg_descriptor_t       *daddr, *naddr, *end;
3732  	mach_msg_descriptor_t       *user_addr, *kern_addr;
3733  	mach_msg_type_number_t      dsc_count;
3734  	boolean_t                   is_task_64bit = (map->max_offset > VM_MAX_ADDRESS);
3735  	boolean_t                   complex = FALSE;
3736  	boolean_t                   contains_port_desc = FALSE;
3737  	vm_size_t                   space_needed = 0;
3738  	vm_offset_t                 paddr = 0;
3739  	vm_map_copy_t               copy = VM_MAP_COPY_NULL;
3740  	mach_msg_type_number_t      i;
3741  	mach_msg_return_t           mr = MACH_MSG_SUCCESS;
3742  	ipc_port_t                  remote_port = kmsg->ikm_header->msgh_remote_port;
3743  
3744  	vm_size_t           descriptor_size = 0;
3745  
3746  	mach_msg_type_number_t total_ool_port_count = 0;
3747  	mach_msg_guard_flags_t guard_flags = 0;
3748  	mach_port_context_t context;
3749  	mach_msg_type_name_t disp;
3750  
3751  	/*
3752  	 * Determine if the target is a kernel port.
3753  	 */
3754  	dest = ip_to_object(remote_port);
3755  	body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
3756  	naddr = (mach_msg_descriptor_t *) (body + 1);
3757  	end = (mach_msg_descriptor_t *) ((vm_offset_t)kmsg->ikm_header + kmsg->ikm_header->msgh_size);
3758  
3759  	dsc_count = body->msgh_descriptor_count;
3760  	if (dsc_count == 0) {
3761  		return MACH_MSG_SUCCESS;
3762  	}
3763  
3764  	/*
3765  	 * Make an initial pass to determine kernal VM space requirements for
3766  	 * physical copies and possible contraction of the descriptors from
3767  	 * processes with pointers larger than the kernel's.
3768  	 */
3769  	daddr = NULL;
3770  	for (i = 0; i < dsc_count; i++) {
3771  		mach_msg_size_t size;
3772  		mach_msg_type_number_t ool_port_count = 0;
3773  
3774  		daddr = naddr;
3775  
3776  		/* make sure the descriptor fits in the message */
3777  		if (is_task_64bit) {
3778  			if ((mach_msg_descriptor_t*)((vm_offset_t)daddr + 12) > end) {
3779  				mr = MACH_SEND_MSG_TOO_SMALL;
3780  				goto clean_message;
3781  			}
3782  
3783  			switch (daddr->type.type) {
3784  			case MACH_MSG_OOL_DESCRIPTOR:
3785  			case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
3786  			case MACH_MSG_OOL_PORTS_DESCRIPTOR:
3787  			case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
3788  				descriptor_size += 16;
3789  				naddr = (typeof(naddr))((vm_offset_t)daddr + 16);
3790  				break;
3791  			default:
3792  				descriptor_size += 12;
3793  				naddr = (typeof(naddr))((vm_offset_t)daddr + 12);
3794  				break;
3795  			}
3796  		} else {
3797  			descriptor_size += 12;
3798  			naddr = (typeof(naddr))((vm_offset_t)daddr + 12);
3799  		}
3800  
3801  		if (naddr > end) {
3802  			mr = MACH_SEND_MSG_TOO_SMALL;
3803  			goto clean_message;
3804  		}
3805  
3806  		switch (daddr->type.type) {
3807  		case MACH_MSG_OOL_DESCRIPTOR:
3808  		case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
3809  			size = (is_task_64bit) ?
3810  			    ((mach_msg_ool_descriptor64_t *)daddr)->size :
3811  			    daddr->out_of_line.size;
3812  
3813  			if (daddr->out_of_line.copy != MACH_MSG_PHYSICAL_COPY &&
3814  			    daddr->out_of_line.copy != MACH_MSG_VIRTUAL_COPY) {
3815  				/*
3816  				 * Invalid copy option
3817  				 */
3818  				mr = MACH_SEND_INVALID_TYPE;
3819  				goto clean_message;
3820  			}
3821  
3822  			if ((size >= MSG_OOL_SIZE_SMALL) &&
3823  			    (daddr->out_of_line.copy == MACH_MSG_PHYSICAL_COPY) &&
3824  			    !(daddr->out_of_line.deallocate)) {
3825  				/*
3826  				 * Out-of-line memory descriptor, accumulate kernel
3827  				 * memory requirements
3828  				 */
3829  				if (space_needed + round_page(size) <= space_needed) {
3830  					/* Overflow dectected */
3831  					mr = MACH_MSG_VM_KERNEL;
3832  					goto clean_message;
3833  				}
3834  
3835  				space_needed += round_page(size);
3836  				if (space_needed > ipc_kmsg_max_vm_space) {
3837  					/* Per message kernel memory limit exceeded */
3838  					mr = MACH_MSG_VM_KERNEL;
3839  					goto clean_message;
3840  				}
3841  			}
3842  			break;
3843  		case MACH_MSG_PORT_DESCRIPTOR:
3844  			if (os_add_overflow(total_ool_port_count, 1, &total_ool_port_count)) {
3845  				/* Overflow detected */
3846  				mr = MACH_SEND_TOO_LARGE;
3847  				goto clean_message;
3848  			}
3849  			contains_port_desc = TRUE;
3850  			break;
3851  		case MACH_MSG_OOL_PORTS_DESCRIPTOR:
3852  			ool_port_count = (is_task_64bit) ?
3853  			    ((mach_msg_ool_ports_descriptor64_t *)daddr)->count :
3854  			    daddr->ool_ports.count;
3855  
3856  			if (os_add_overflow(total_ool_port_count, ool_port_count, &total_ool_port_count)) {
3857  				/* Overflow detected */
3858  				mr = MACH_SEND_TOO_LARGE;
3859  				goto clean_message;
3860  			}
3861  
3862  			if (ool_port_count > (ipc_kmsg_max_vm_space / sizeof(mach_port_t))) {
3863  				/* Per message kernel memory limit exceeded */
3864  				mr = MACH_SEND_TOO_LARGE;
3865  				goto clean_message;
3866  			}
3867  			contains_port_desc = TRUE;
3868  			break;
3869  		case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
3870  			guard_flags = (is_task_64bit) ?
3871  			    ((mach_msg_guarded_port_descriptor64_t *)daddr)->flags :
3872  			    ((mach_msg_guarded_port_descriptor32_t *)daddr)->flags;
3873  			context = (is_task_64bit) ?
3874  			    ((mach_msg_guarded_port_descriptor64_t *)daddr)->context :
3875  			    ((mach_msg_guarded_port_descriptor32_t *)daddr)->context;
3876  			disp = (is_task_64bit) ?
3877  			    ((mach_msg_guarded_port_descriptor64_t *)daddr)->disposition :
3878  			    ((mach_msg_guarded_port_descriptor32_t *)daddr)->disposition;
3879  
3880  			/* Only MACH_MSG_TYPE_MOVE_RECEIVE is supported for now */
3881  			if (!guard_flags || ((guard_flags & ~MACH_MSG_GUARD_FLAGS_MASK) != 0) ||
3882  			    ((guard_flags & MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND) && (context != 0)) ||
3883  			    (disp != MACH_MSG_TYPE_MOVE_RECEIVE)) {
3884  				/*
3885  				 * Invalid guard flags, context or disposition
3886  				 */
3887  				mr = MACH_SEND_INVALID_TYPE;
3888  				goto clean_message;
3889  			}
3890  			if (os_add_overflow(total_ool_port_count, 1, &total_ool_port_count)) {
3891  				/* Overflow detected */
3892  				mr = MACH_SEND_TOO_LARGE;
3893  				goto clean_message;
3894  			}
3895  			contains_port_desc = TRUE;
3896  			break;
3897  		}
3898  	}
3899  
3900  	/* Sending more than 16383 rights in one message seems crazy */
3901  	if (total_ool_port_count >= (MACH_PORT_UREFS_MAX / 4)) {
3902  		mr = MACH_SEND_TOO_LARGE;
3903  		goto clean_message;
3904  	}
3905  
3906  	/*
3907  	 * Check if dest is a no-grant port; Since this bit is set only on
3908  	 * port construction and cannot be unset later, we can peek at the
3909  	 * bit without paying the cost of locking the port.
3910  	 */
3911  	if (contains_port_desc && remote_port->ip_no_grant) {
3912  		mr = MACH_SEND_NO_GRANT_DEST;
3913  		goto clean_message;
3914  	}
3915  
3916  	/*
3917  	 * Allocate space in the pageable kernel ipc copy map for all the
3918  	 * ool data that is to be physically copied.  Map is marked wait for
3919  	 * space.
3920  	 */
3921  	if (space_needed) {
3922  		if (vm_allocate_kernel(ipc_kernel_copy_map, &paddr, space_needed,
3923  		    VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC) != KERN_SUCCESS) {
3924  			mr = MACH_MSG_VM_KERNEL;
3925  			goto clean_message;
3926  		}
3927  	}
3928  
3929  	/* user_addr = just after base as it was copied in */
3930  	user_addr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t));
3931  
3932  	/* Shift the mach_msg_base_t down to make room for dsc_count*16bytes of descriptors on 64 bit kernels
3933  	 */
3934  	if (descriptor_size != 16 * dsc_count) {
3935  		vm_offset_t dsc_adjust = 16 * dsc_count - descriptor_size;
3936  
3937  		memmove((char *)(((vm_offset_t)kmsg->ikm_header) - dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
3938  		kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header - dsc_adjust);
3939  
3940  		/* Update the message size for the larger in-kernel representation */
3941  		kmsg->ikm_header->msgh_size += (mach_msg_size_t)dsc_adjust;
3942  	}
3943  
3944  
3945  	/* kern_addr = just after base after it has been (conditionally) moved */
3946  	kern_addr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t));
3947  
3948  	/* handle the OOL regions and port descriptors. */
3949  	for (i = 0; i < dsc_count; i++) {
3950  		switch (user_addr->type.type) {
3951  		case MACH_MSG_PORT_DESCRIPTOR:
3952  			user_addr = ipc_kmsg_copyin_port_descriptor((mach_msg_port_descriptor_t *)kern_addr,
3953  			    (mach_msg_legacy_port_descriptor_t *)user_addr, space, dest, kmsg, optionp, &mr);
3954  			kern_addr++;
3955  			complex = TRUE;
3956  			break;
3957  		case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
3958  		case MACH_MSG_OOL_DESCRIPTOR:
3959  			user_addr = ipc_kmsg_copyin_ool_descriptor((mach_msg_ool_descriptor_t *)kern_addr,
3960  			    user_addr, is_task_64bit, &paddr, &copy, &space_needed, map, optionp, &mr);
3961  			kern_addr++;
3962  			complex = TRUE;
3963  			break;
3964  		case MACH_MSG_OOL_PORTS_DESCRIPTOR:
3965  			user_addr = ipc_kmsg_copyin_ool_ports_descriptor((mach_msg_ool_ports_descriptor_t *)kern_addr,
3966  			    user_addr, is_task_64bit, map, space, dest, kmsg, optionp, &mr);
3967  			kern_addr++;
3968  			complex = TRUE;
3969  			break;
3970  		case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
3971  			user_addr = ipc_kmsg_copyin_guarded_port_descriptor((mach_msg_guarded_port_descriptor_t *)kern_addr,
3972  			    user_addr, is_task_64bit, space, dest, kmsg, optionp, &mr);
3973  			kern_addr++;
3974  			complex = TRUE;
3975  			break;
3976  		default:
3977  			/* Invalid descriptor */
3978  			mr = MACH_SEND_INVALID_TYPE;
3979  			break;
3980  		}
3981  
3982  		if (MACH_MSG_SUCCESS != mr) {
3983  			/* clean from start of message descriptors to i */
3984  			ipc_kmsg_clean_partial(kmsg, i,
3985  			    (mach_msg_descriptor_t *)((mach_msg_base_t *)kmsg->ikm_header + 1),
3986  			    paddr, space_needed);
3987  			goto out;
3988  		}
3989  	}         /* End of loop */
3990  
3991  	if (!complex) {
3992  		kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_COMPLEX;
3993  	}
3994  out:
3995  	return mr;
3996  
3997  clean_message:
3998  	/* no descriptors have been copied in yet */
3999  	ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
4000  	return mr;
4001  }
4002  
4003  
4004  /*
4005   *	Routine:	ipc_kmsg_copyin
4006   *	Purpose:
4007   *		"Copy-in" port rights and out-of-line memory
4008   *		in the message.
4009   *
4010   *		In all failure cases, the message is left holding
4011   *		no rights or memory.  However, the message buffer
4012   *		is not deallocated.  If successful, the message
4013   *		contains a valid destination port.
4014   *	Conditions:
4015   *		Nothing locked.
4016   *	Returns:
4017   *		MACH_MSG_SUCCESS	Successful copyin.
4018   *		MACH_SEND_INVALID_HEADER Illegal value in the message header bits.
4019   *		MACH_SEND_INVALID_DEST	Can't copyin destination port.
4020   *		MACH_SEND_INVALID_REPLY	Can't copyin reply port.
4021   *		MACH_SEND_INVALID_MEMORY	Can't grab out-of-line memory.
4022   *		MACH_SEND_INVALID_RIGHT	Can't copyin port right in body.
4023   *		MACH_SEND_INVALID_TYPE	Bad type specification.
4024   *		MACH_SEND_MSG_TOO_SMALL	Body is too small for types/data.
4025   */
4026  
4027  mach_msg_return_t
4028  ipc_kmsg_copyin(
4029  	ipc_kmsg_t              kmsg,
4030  	ipc_space_t             space,
4031  	vm_map_t                map,
4032  	mach_msg_priority_t     priority,
4033  	mach_msg_option_t       *optionp)
4034  {
4035  	mach_msg_return_t           mr;
4036  	mach_port_name_t dest_name = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_remote_port);
4037  
4038  	kmsg->ikm_header->msgh_bits &= MACH_MSGH_BITS_USER;
4039  
4040  	mr = ipc_kmsg_copyin_header(kmsg, space, priority, optionp);
4041  
4042  	if (mr != MACH_MSG_SUCCESS) {
4043  		return mr;
4044  	}
4045  
4046  	/* Get the message filter policy if the task and port support filtering */
4047  	mach_msg_filter_id fid = 0;
4048  	if (ip_enforce_msg_filtering(kmsg->ikm_header->msgh_remote_port) &&
4049  	    task_get_filter_msg_flag(current_task())) {
4050  		/* port label is yet to be supported */
4051  		boolean_t allow_kmsg = mach_msg_fetch_filter_policy(NULL, kmsg->ikm_header->msgh_id, &fid);
4052  		if (!allow_kmsg) {
4053  			mach_port_guard_exception(dest_name, 0, 0, kGUARD_EXC_MSG_FILTERED);
4054  			/* no descriptors have been copied in yet */
4055  			ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
4056  			return MACH_SEND_MSG_FILTERED;
4057  		}
4058  		kmsg->ikm_filter_policy_id = fid;
4059  	}
4060  
4061  	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_SEND) | DBG_FUNC_NONE,
4062  	    VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
4063  	    (uintptr_t)kmsg->ikm_header->msgh_bits,
4064  	    (uintptr_t)kmsg->ikm_header->msgh_id,
4065  	    VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(kmsg->ikm_voucher)),
4066  	    0);
4067  
4068  	DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_copyin header:\n%.8x\n%.8x\n%p\n%p\n%p\n%.8x\n",
4069  	    kmsg->ikm_header->msgh_size,
4070  	    kmsg->ikm_header->msgh_bits,
4071  	    kmsg->ikm_header->msgh_remote_port,
4072  	    kmsg->ikm_header->msgh_local_port,
4073  	    kmsg->ikm_voucher,
4074  	    kmsg->ikm_header->msgh_id);
4075  
4076  	if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
4077  		mr = ipc_kmsg_copyin_body( kmsg, space, map, optionp);
4078  
4079  		/* unreachable if !DEBUG */
4080  		__unreachable_ok_push
4081  		if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {
4082  			kprintf("body:\n");
4083  			uint32_t i;
4084  			for (i = 0; i * 4 < (kmsg->ikm_header->msgh_size - sizeof(mach_msg_header_t)); i++) {
4085  				kprintf("%.4x\n", ((uint32_t *)(kmsg->ikm_header + 1))[i]);
4086  			}
4087  		}
4088  		__unreachable_ok_pop
4089  	}
4090  
4091  	/* Sign the message contents */
4092  	if (mr == MACH_MSG_SUCCESS) {
4093  		ikm_sign(kmsg);
4094  	}
4095  
4096  	return mr;
4097  }
4098  
4099  /*
4100   *	Routine:	ipc_kmsg_copyin_from_kernel
4101   *	Purpose:
4102   *		"Copy-in" port rights and out-of-line memory
4103   *		in a message sent from the kernel.
4104   *
4105   *		Because the message comes from the kernel,
4106   *		the implementation assumes there are no errors
4107   *		or peculiarities in the message.
4108   *	Conditions:
4109   *		Nothing locked.
4110   */
4111  
4112  mach_msg_return_t
4113  ipc_kmsg_copyin_from_kernel(
4114  	ipc_kmsg_t      kmsg)
4115  {
4116  	mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits;
4117  	mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits);
4118  	mach_msg_type_name_t lname = MACH_MSGH_BITS_LOCAL(bits);
4119  	ipc_object_t remote = ip_to_object(kmsg->ikm_header->msgh_remote_port);
4120  	ipc_object_t local = ip_to_object(kmsg->ikm_header->msgh_local_port);
4121  	ipc_port_t dest = kmsg->ikm_header->msgh_remote_port;
4122  
4123  	/* translate the destination and reply ports */
4124  	if (!IO_VALID(remote)) {
4125  		return MACH_SEND_INVALID_DEST;
4126  	}
4127  
4128  	ipc_object_copyin_from_kernel(remote, rname);
4129  	if (IO_VALID(local)) {
4130  		ipc_object_copyin_from_kernel(local, lname);
4131  	}
4132  
4133  	/*
4134  	 *	The common case is a complex message with no reply port,
4135  	 *	because that is what the memory_object interface uses.
4136  	 */
4137  
4138  	if (bits == (MACH_MSGH_BITS_COMPLEX |
4139  	    MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) {
4140  		bits = (MACH_MSGH_BITS_COMPLEX |
4141  		    MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0));
4142  
4143  		kmsg->ikm_header->msgh_bits = bits;
4144  	} else {
4145  		bits = (MACH_MSGH_BITS_OTHER(bits) |
4146  		    MACH_MSGH_BITS(ipc_object_copyin_type(rname),
4147  		    ipc_object_copyin_type(lname)));
4148  
4149  		kmsg->ikm_header->msgh_bits = bits;
4150  	}
4151  
4152  	if (bits & MACH_MSGH_BITS_COMPLEX) {
4153  		/*
4154  		 * Check if the remote port accepts ports in the body.
4155  		 */
4156  		if (dest->ip_no_grant) {
4157  			mach_msg_descriptor_t   *saddr;
4158  			mach_msg_body_t         *body;
4159  			mach_msg_type_number_t  i, count;
4160  
4161  			body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
4162  			saddr = (mach_msg_descriptor_t *) (body + 1);
4163  			count = body->msgh_descriptor_count;
4164  
4165  			for (i = 0; i < count; i++, saddr++) {
4166  				switch (saddr->type.type) {
4167  				case MACH_MSG_PORT_DESCRIPTOR:
4168  				case MACH_MSG_OOL_PORTS_DESCRIPTOR:
4169  				case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
4170  					/* no descriptors have been copied in yet */
4171  					ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
4172  					return MACH_SEND_NO_GRANT_DEST;
4173  				}
4174  			}
4175  		}
4176  
4177  		mach_msg_descriptor_t   *saddr;
4178  		mach_msg_body_t         *body;
4179  		mach_msg_type_number_t  i, count;
4180  
4181  		body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
4182  		saddr = (mach_msg_descriptor_t *) (body + 1);
4183  		count = body->msgh_descriptor_count;
4184  
4185  		for (i = 0; i < count; i++, saddr++) {
4186  			switch (saddr->type.type) {
4187  			case MACH_MSG_PORT_DESCRIPTOR: {
4188  				mach_msg_type_name_t        name;
4189  				ipc_object_t                object;
4190  				mach_msg_port_descriptor_t  *dsc;
4191  
4192  				dsc = &saddr->port;
4193  
4194  				/* this is really the type SEND, SEND_ONCE, etc. */
4195  				name = dsc->disposition;
4196  				object = ip_to_object(dsc->name);
4197  				dsc->disposition = ipc_object_copyin_type(name);
4198  
4199  				if (!IO_VALID(object)) {
4200  					break;
4201  				}
4202  
4203  				ipc_object_copyin_from_kernel(object, name);
4204  
4205  				/* CDY avoid circularity when the destination is also */
4206  				/* the kernel.  This check should be changed into an  */
4207  				/* assert when the new kobject model is in place since*/
4208  				/* ports will not be used in kernel to kernel chats   */
4209  
4210  				if (ip_object_to_port(remote)->ip_receiver != ipc_space_kernel) {
4211  					if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4212  					    ipc_port_check_circularity(ip_object_to_port(object),
4213  					    ip_object_to_port(remote))) {
4214  						kmsg->ikm_header->msgh_bits |=
4215  						    MACH_MSGH_BITS_CIRCULAR;
4216  					}
4217  				}
4218  				break;
4219  			}
4220  			case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
4221  			case MACH_MSG_OOL_DESCRIPTOR: {
4222  				/*
4223  				 * The sender should supply ready-made memory, i.e.
4224  				 * a vm_map_copy_t, so we don't need to do anything.
4225  				 */
4226  				break;
4227  			}
4228  			case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
4229  				ipc_object_t                        *objects;
4230  				unsigned int                        j;
4231  				mach_msg_type_name_t                name;
4232  				mach_msg_ool_ports_descriptor_t     *dsc;
4233  
4234  				dsc = (mach_msg_ool_ports_descriptor_t *)&saddr->ool_ports;
4235  
4236  				/* this is really the type SEND, SEND_ONCE, etc. */
4237  				name = dsc->disposition;
4238  				dsc->disposition = ipc_object_copyin_type(name);
4239  
4240  				objects = (ipc_object_t *) dsc->address;
4241  
4242  				for (j = 0; j < dsc->count; j++) {
4243  					ipc_object_t object = objects[j];
4244  
4245  					if (!IO_VALID(object)) {
4246  						continue;
4247  					}
4248  
4249  					ipc_object_copyin_from_kernel(object, name);
4250  
4251  					if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4252  					    ipc_port_check_circularity(ip_object_to_port(object),
4253  					    ip_object_to_port(remote))) {
4254  						kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
4255  					}
4256  				}
4257  				break;
4258  			}
4259  			case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
4260  				mach_msg_guarded_port_descriptor_t *dsc = (typeof(dsc)) & saddr->guarded_port;
4261  				mach_msg_type_name_t disp = dsc->disposition;
4262  				ipc_object_t object = ip_to_object(dsc->name);
4263  				dsc->disposition = ipc_object_copyin_type(disp);
4264  				assert(dsc->flags == 0);
4265  
4266  				if (!IO_VALID(object)) {
4267  					break;
4268  				}
4269  
4270  				ipc_object_copyin_from_kernel(object, disp);
4271  				/*
4272  				 * avoid circularity when the destination is also
4273  				 * the kernel.  This check should be changed into an
4274  				 * assert when the new kobject model is in place since
4275  				 * ports will not be used in kernel to kernel chats
4276  				 */
4277  
4278  				if (ip_object_to_port(remote)->ip_receiver != ipc_space_kernel) {
4279  					if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4280  					    ipc_port_check_circularity(ip_object_to_port(object),
4281  					    ip_object_to_port(remote))) {
4282  						kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
4283  					}
4284  				}
4285  				break;
4286  			}
4287  			default: {
4288  #if     MACH_ASSERT
4289  				panic("ipc_kmsg_copyin_from_kernel:  bad descriptor");
4290  #endif  /* MACH_ASSERT */
4291  			}
4292  			}
4293  		}
4294  	}
4295  
4296  	/* Add the signature to the message */
4297  	ikm_sign(kmsg);
4298  
4299  	return MACH_MSG_SUCCESS;
4300  }
4301  
4302  #if IKM_SUPPORT_LEGACY
4303  mach_msg_return_t
4304  ipc_kmsg_copyin_from_kernel_legacy(
4305  	ipc_kmsg_t      kmsg)
4306  {
4307  	mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits;
4308  	mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits);
4309  	mach_msg_type_name_t lname = MACH_MSGH_BITS_LOCAL(bits);
4310  	ipc_object_t remote = ip_to_object(kmsg->ikm_header->msgh_remote_port);
4311  	ipc_object_t local = ip_to_object(kmsg->ikm_header->msgh_local_port);
4312  	ipc_port_t dest = kmsg->ikm_header->msgh_remote_port;
4313  
4314  	/* translate the destination and reply ports */
4315  	if (!IO_VALID(remote)) {
4316  		return MACH_SEND_INVALID_DEST;
4317  	}
4318  
4319  	ipc_object_copyin_from_kernel(remote, rname);
4320  	if (IO_VALID(local)) {
4321  		ipc_object_copyin_from_kernel(local, lname);
4322  	}
4323  
4324  	/*
4325  	 *	The common case is a complex message with no reply port,
4326  	 *	because that is what the memory_object interface uses.
4327  	 */
4328  
4329  	if (bits == (MACH_MSGH_BITS_COMPLEX |
4330  	    MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) {
4331  		bits = (MACH_MSGH_BITS_COMPLEX |
4332  		    MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0));
4333  
4334  		kmsg->ikm_header->msgh_bits = bits;
4335  	} else {
4336  		bits = (MACH_MSGH_BITS_OTHER(bits) |
4337  		    MACH_MSGH_BITS(ipc_object_copyin_type(rname),
4338  		    ipc_object_copyin_type(lname)));
4339  
4340  		kmsg->ikm_header->msgh_bits = bits;
4341  	}
4342  
4343  	if (bits & MACH_MSGH_BITS_COMPLEX) {
4344  		if (dest->ip_no_grant) {
4345  			mach_msg_descriptor_t   *saddr;
4346  			mach_msg_body_t         *body;
4347  			mach_msg_type_number_t  i, count;
4348  
4349  			body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
4350  			saddr = (mach_msg_descriptor_t *) (body + 1);
4351  			count = body->msgh_descriptor_count;
4352  
4353  			for (i = 0; i < count; i++, saddr++) {
4354  				switch (saddr->type.type) {
4355  				case MACH_MSG_PORT_DESCRIPTOR:
4356  				case MACH_MSG_OOL_PORTS_DESCRIPTOR:
4357  				case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
4358  					/* no descriptors have been copied in yet */
4359  					ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
4360  					return MACH_SEND_NO_GRANT_DEST;
4361  				}
4362  			}
4363  		}
4364  
4365  		mach_msg_legacy_descriptor_t    *saddr;
4366  		mach_msg_descriptor_t   *daddr;
4367  		mach_msg_body_t         *body;
4368  		mach_msg_type_number_t  i, count;
4369  
4370  		body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
4371  		saddr = (typeof(saddr))(body + 1);
4372  		count = body->msgh_descriptor_count;
4373  
4374  		if (count) {
4375  			vm_offset_t dsc_adjust = 4 * count;
4376  			memmove((char *)(((vm_offset_t)kmsg->ikm_header) - dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
4377  			kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header - dsc_adjust);
4378  			/* Update the message size for the larger in-kernel representation */
4379  			kmsg->ikm_header->msgh_size += dsc_adjust;
4380  		}
4381  		daddr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t));
4382  
4383  		for (i = 0; i < count; i++, saddr++, daddr++) {
4384  			switch (saddr->type.type) {
4385  			case MACH_MSG_PORT_DESCRIPTOR: {
4386  				mach_msg_type_name_t        name;
4387  				ipc_object_t                object;
4388  				mach_msg_legacy_port_descriptor_t   *dsc;
4389  				mach_msg_port_descriptor_t  *dest_dsc;
4390  
4391  				dsc = (typeof(dsc)) & saddr->port;
4392  				dest_dsc = &daddr->port;
4393  
4394  				/* this is really the type SEND, SEND_ONCE, etc. */
4395  				name = dsc->disposition;
4396  				object = ip_to_object(CAST_MACH_NAME_TO_PORT(dsc->name));
4397  				dest_dsc->disposition = ipc_object_copyin_type(name);
4398  				dest_dsc->name = ip_object_to_port(object);
4399  				dest_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
4400  
4401  				if (!IO_VALID(object)) {
4402  					break;
4403  				}
4404  
4405  				ipc_object_copyin_from_kernel(object, name);
4406  
4407  				/* CDY avoid circularity when the destination is also */
4408  				/* the kernel.  This check should be changed into an  */
4409  				/* assert when the new kobject model is in place since*/
4410  				/* ports will not be used in kernel to kernel chats   */
4411  
4412  				if (ip_object_to_port(remote)->ip_receiver != ipc_space_kernel) {
4413  					if ((dest_dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4414  					    ipc_port_check_circularity(ip_object_to_port(object),
4415  					    ip_object_to_port(remote))) {
4416  						kmsg->ikm_header->msgh_bits |=
4417  						    MACH_MSGH_BITS_CIRCULAR;
4418  					}
4419  				}
4420  				break;
4421  			}
4422  			case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
4423  			case MACH_MSG_OOL_DESCRIPTOR: {
4424  				/* The sender should supply ready-made memory, i.e. a vm_map_copy_t
4425  				 * so we don't need to do anything special. */
4426  
4427  				mach_msg_ool_descriptor32_t *source_dsc = &saddr->out_of_line32;
4428  				mach_msg_ool_descriptor_t   *dest_dsc = (typeof(dest_dsc)) & daddr->out_of_line;
4429  
4430  				vm_offset_t             address = source_dsc->address;
4431  				vm_size_t                           size = source_dsc->size;
4432  				boolean_t                           deallocate = source_dsc->deallocate;
4433  				mach_msg_copy_options_t             copy = source_dsc->copy;
4434  				mach_msg_descriptor_type_t  type = source_dsc->type;
4435  
4436  				dest_dsc->address = (void *)address;
4437  				dest_dsc->size = size;
4438  				dest_dsc->deallocate = deallocate;
4439  				dest_dsc->copy = copy;
4440  				dest_dsc->type = type;
4441  				break;
4442  			}
4443  			case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
4444  				ipc_object_t                        *objects;
4445  				unsigned int                        j;
4446  				mach_msg_type_name_t                name;
4447  				mach_msg_ool_ports_descriptor_t     *dest_dsc;
4448  
4449  				mach_msg_ool_ports_descriptor32_t   *source_dsc = &saddr->ool_ports32;
4450  				dest_dsc = (typeof(dest_dsc)) & daddr->ool_ports;
4451  
4452  				boolean_t deallocate = source_dsc->deallocate;
4453  				mach_msg_copy_options_t copy = source_dsc->copy;
4454  				mach_msg_size_t port_count = source_dsc->count;
4455  				mach_msg_type_name_t disposition = source_dsc->disposition;
4456  
4457  				/* this is really the type SEND, SEND_ONCE, etc. */
4458  				name = disposition;
4459  				disposition = ipc_object_copyin_type(name);
4460  
4461  				objects = (ipc_object_t *) (uintptr_t)source_dsc->address;
4462  
4463  				for (j = 0; j < port_count; j++) {
4464  					ipc_object_t object = objects[j];
4465  
4466  					if (!IO_VALID(object)) {
4467  						continue;
4468  					}
4469  
4470  					ipc_object_copyin_from_kernel(object, name);
4471  
4472  					if ((disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4473  					    ipc_port_check_circularity(ip_object_to_port(object),
4474  					    ip_object_to_port(remote))) {
4475  						kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
4476  					}
4477  				}
4478  
4479  				dest_dsc->address = objects;
4480  				dest_dsc->deallocate = deallocate;
4481  				dest_dsc->copy = copy;
4482  				dest_dsc->disposition = disposition;
4483  				dest_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
4484  				dest_dsc->count = port_count;
4485  				break;
4486  			}
4487  			case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
4488  				mach_msg_type_name_t  disp;
4489  				ipc_object_t object;
4490  				mach_msg_guarded_port_descriptor32_t   *dsc;
4491  				mach_msg_guarded_port_descriptor_t  *dest_dsc;
4492  
4493  				dsc = (typeof(dsc)) & saddr->guarded_port32;
4494  				dest_dsc = &daddr->guarded_port;
4495  
4496  				disp = dsc->disposition;
4497  				object = ip_to_object(CAST_MACH_NAME_TO_PORT(dsc->name));
4498  				assert(dsc->flags == 0);
4499  				assert(dsc->context == 0);
4500  
4501  				dest_dsc->disposition = ipc_object_copyin_type(disp);
4502  				dest_dsc->name = ip_object_to_port(object);
4503  				dest_dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
4504  				dest_dsc->flags = 0;
4505  
4506  				if (!IO_VALID(object)) {
4507  					break;
4508  				}
4509  
4510  				ipc_object_copyin_from_kernel(object, disp);
4511  
4512  				/* CDY avoid circularity when the destination is also */
4513  				/* the kernel.  This check should be changed into an  */
4514  				/* assert when the new kobject model is in place since*/
4515  				/* ports will not be used in kernel to kernel chats   */
4516  
4517  				if (ip_object_to_port(remote)->ip_receiver != ipc_space_kernel) {
4518  					if ((dest_dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4519  					    ipc_port_check_circularity(ip_object_to_port(object),
4520  					    ip_object_to_port(remote))) {
4521  						kmsg->ikm_header->msgh_bits |=
4522  						    MACH_MSGH_BITS_CIRCULAR;
4523  					}
4524  				}
4525  				break;
4526  			}
4527  			default: {
4528  #if     MACH_ASSERT
4529  				panic("ipc_kmsg_copyin_from_kernel:  bad descriptor");
4530  #endif  /* MACH_ASSERT */
4531  			}
4532  			}
4533  		}
4534  	}
4535  
4536  	ikm_sign(kmsg);
4537  
4538  	return MACH_MSG_SUCCESS;
4539  }
4540  #endif /* IKM_SUPPORT_LEGACY */
4541  
4542  /*
4543   *	Routine:	ipc_kmsg_copyout_header
4544   *	Purpose:
4545   *		"Copy-out" port rights in the header of a message.
4546   *		Operates atomically; if it doesn't succeed the
4547   *		message header and the space are left untouched.
4548   *		If it does succeed the remote/local port fields
4549   *		contain port names instead of object pointers,
4550   *		and the bits field is updated.
4551   *	Conditions:
4552   *		Nothing locked.
4553   *	Returns:
4554   *		MACH_MSG_SUCCESS	Copied out port rights.
4555   *		MACH_RCV_INVALID_NOTIFY
4556   *			Notify is non-null and doesn't name a receive right.
4557   *			(Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
4558   *		MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
4559   *			The space is dead.
4560   *		MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
4561   *			No room in space for another name.
4562   *		MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
4563   *			Couldn't allocate memory for the reply port.
4564   *		MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
4565   *			Couldn't allocate memory for the dead-name request.
4566   */
4567  
4568  mach_msg_return_t
4569  ipc_kmsg_copyout_header(
4570  	ipc_kmsg_t              kmsg,
4571  	ipc_space_t             space,
4572  	mach_msg_option_t       option)
4573  {
4574  	mach_msg_header_t *msg = kmsg->ikm_header;
4575  	mach_msg_bits_t mbits = msg->msgh_bits;
4576  	ipc_port_t dest = msg->msgh_remote_port;
4577  
4578  	assert(IP_VALID(dest));
4579  
4580  	/*
4581  	 * While we still hold a reference on the received-from port,
4582  	 * process all send-possible notfications we received along with
4583  	 * the message.
4584  	 */
4585  	ipc_port_spnotify(dest);
4586  
4587  	{
4588  		mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
4589  		mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
4590  		mach_msg_type_name_t voucher_type = MACH_MSGH_BITS_VOUCHER(mbits);
4591  		ipc_port_t reply = msg->msgh_local_port;
4592  		ipc_port_t release_reply_port = IP_NULL;
4593  		mach_port_name_t dest_name, reply_name;
4594  
4595  		ipc_port_t voucher = kmsg->ikm_voucher;
4596  		ipc_port_t release_voucher_port = IP_NULL;
4597  		mach_port_name_t voucher_name;
4598  
4599  		uint32_t entries_held = 0;
4600  		boolean_t need_write_lock = FALSE;
4601  		ipc_object_copyout_flags_t reply_copyout_options = IPC_OBJECT_COPYOUT_FLAGS_NONE;
4602  		kern_return_t kr;
4603  
4604  		/*
4605  		 * Reserve any potentially needed entries in the target space.
4606  		 * We'll free any unused before unlocking the space.
4607  		 */
4608  		if (IP_VALID(reply)) {
4609  			entries_held++;
4610  			need_write_lock = TRUE;
4611  		}
4612  		if (IP_VALID(voucher)) {
4613  			assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
4614  
4615  			if ((option & MACH_RCV_VOUCHER) != 0) {
4616  				entries_held++;
4617  			}
4618  			need_write_lock = TRUE;
4619  		}
4620  
4621  		if (need_write_lock) {
4622  handle_reply_again:
4623  			is_write_lock(space);
4624  
4625  			while (entries_held) {
4626  				if (!is_active(space)) {
4627  					is_write_unlock(space);
4628  					return MACH_RCV_HEADER_ERROR |
4629  					       MACH_MSG_IPC_SPACE;
4630  				}
4631  
4632  				kr = ipc_entries_hold(space, entries_held);
4633  				if (KERN_SUCCESS == kr) {
4634  					break;
4635  				}
4636  
4637  				kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
4638  				if (KERN_SUCCESS != kr) {
4639  					return MACH_RCV_HEADER_ERROR |
4640  					       MACH_MSG_IPC_SPACE;
4641  				}
4642  				/* space was unlocked and relocked - retry */
4643  			}
4644  
4645  			/* Handle reply port. */
4646  			if (IP_VALID(reply)) {
4647  				ipc_port_t reply_subst = IP_NULL;
4648  				ipc_entry_t entry;
4649  
4650  				ip_lock(reply);
4651  
4652  				/* Is the reply port still active and allowed to be copied out? */
4653  				if (!ip_active(reply) ||
4654  				    !ip_label_check(space, reply, reply_type,
4655  				    &reply_copyout_options, &reply_subst)) {
4656  					/* clear the context value */
4657  					reply->ip_reply_context = 0;
4658  					ip_unlock(reply);
4659  
4660  					assert(reply_subst == IP_NULL);
4661  					release_reply_port = reply;
4662  					reply = IP_DEAD;
4663  					reply_name = MACH_PORT_DEAD;
4664  					goto done_with_reply;
4665  				}
4666  
4667  				/* is the kolabel requesting a substitution */
4668  				if (reply_subst != IP_NULL) {
4669  					/*
4670  					 * port is unlocked, its right consumed
4671  					 * space is unlocked
4672  					 */
4673  					assert(reply_type == MACH_MSG_TYPE_PORT_SEND);
4674  					msg->msgh_local_port = reply = reply_subst;
4675  					goto handle_reply_again;
4676  				}
4677  
4678  
4679  				/* Is there already an entry we can use? */
4680  				if ((reply_type != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
4681  				    ipc_right_reverse(space, ip_to_object(reply), &reply_name, &entry)) {
4682  					assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
4683  				} else {
4684  					/* claim a held entry for the reply port */
4685  					assert(entries_held > 0);
4686  					entries_held--;
4687  					ipc_entry_claim(space, &reply_name, &entry);
4688  					assert(!ipc_right_inuse(entry));
4689  					assert(entry->ie_object == IO_NULL);
4690  					entry->ie_object = ip_to_object(reply);
4691  				}
4692  
4693  				/* space and reply port are locked and active */
4694  				ip_reference(reply);         /* hold onto the reply port */
4695  
4696  				/*
4697  				 * If the receiver would like to enforce strict reply
4698  				 * semantics, and the message looks like it expects a reply,
4699  				 * and contains a voucher, then link the context in the
4700  				 * voucher with the reply port so that the next message sent
4701  				 * to the reply port must come from a thread that has a
4702  				 * matching context (voucher).
4703  				 */
4704  				if (enforce_strict_reply && MACH_RCV_WITH_STRICT_REPLY(option) && IP_VALID(voucher)) {
4705  					if (ipc_kmsg_validate_reply_port_locked(reply, option) != KERN_SUCCESS) {
4706  						/* if the receiver isn't happy with the reply port: fail the receive. */
4707  						ip_unlock(reply);
4708  						ipc_entry_dealloc(space, reply_name, entry);
4709  						is_write_unlock(space);
4710  						ip_release(reply);
4711  						return MACH_RCV_INVALID_REPLY;
4712  					}
4713  					ipc_kmsg_link_reply_context_locked(reply, voucher);
4714  				} else {
4715  					/*
4716  					 * if the receive did not choose to participate
4717  					 * in the strict reply/RPC, then don't enforce
4718  					 * anything (as this could lead to booby-trapped
4719  					 * messages that kill the server).
4720  					 */
4721  					reply->ip_reply_context = 0;
4722  				}
4723  
4724  				kr = ipc_right_copyout(space, reply_name, entry,
4725  				    reply_type, IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL,
4726  				    ip_to_object(reply));
4727  				assert(kr == KERN_SUCCESS);
4728  				/* reply port is unlocked */
4729  			} else {
4730  				reply_name = CAST_MACH_PORT_TO_NAME(reply);
4731  			}
4732  
4733  done_with_reply:
4734  
4735  			/* Handle voucher port. */
4736  			if (voucher_type != MACH_MSGH_BITS_ZERO) {
4737  				assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
4738  
4739  				if (!IP_VALID(voucher)) {
4740  					if ((option & MACH_RCV_VOUCHER) == 0) {
4741  						voucher_type = MACH_MSGH_BITS_ZERO;
4742  					}
4743  					voucher_name = MACH_PORT_NULL;
4744  					goto done_with_voucher;
4745  				}
4746  
4747  				/* clear voucher from its hiding place back in the kmsg */
4748  				kmsg->ikm_voucher = IP_NULL;
4749  
4750  				if ((option & MACH_RCV_VOUCHER) != 0) {
4751  					ipc_entry_t entry;
4752  
4753  					ip_lock(voucher);
4754  
4755  					if (ipc_right_reverse(space, ip_to_object(voucher),
4756  					    &voucher_name, &entry)) {
4757  						assert(entry->ie_bits & MACH_PORT_TYPE_SEND);
4758  					} else {
4759  						assert(entries_held > 0);
4760  						entries_held--;
4761  						ipc_entry_claim(space, &voucher_name, &entry);
4762  						assert(!ipc_right_inuse(entry));
4763  						assert(entry->ie_object == IO_NULL);
4764  						entry->ie_object = ip_to_object(voucher);
4765  					}
4766  					/* space is locked and active */
4767  
4768  					assert(ip_kotype(voucher) == IKOT_VOUCHER);
4769  					kr = ipc_right_copyout(space, voucher_name, entry,
4770  					    MACH_MSG_TYPE_MOVE_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
4771  					    NULL, NULL, ip_to_object(voucher));
4772  					/* voucher port is unlocked */
4773  				} else {
4774  					voucher_type = MACH_MSGH_BITS_ZERO;
4775  					release_voucher_port = voucher;
4776  					voucher_name = MACH_PORT_NULL;
4777  				}
4778  			} else {
4779  				voucher_name = msg->msgh_voucher_port;
4780  			}
4781  
4782  done_with_voucher:
4783  
4784  			ip_lock(dest);
4785  			is_write_unlock(space);
4786  		} else {
4787  			/*
4788  			 *	No reply or voucher port!  This is an easy case.
4789  			 *	We only need to have the space locked
4790  			 *	when locking the destination.
4791  			 */
4792  
4793  			is_read_lock(space);
4794  			if (!is_active(space)) {
4795  				is_read_unlock(space);
4796  				return MACH_RCV_HEADER_ERROR | MACH_MSG_IPC_SPACE;
4797  			}
4798  
4799  			ip_lock(dest);
4800  			is_read_unlock(space);
4801  
4802  			reply_name = CAST_MACH_PORT_TO_NAME(reply);
4803  
4804  			if (voucher_type != MACH_MSGH_BITS_ZERO) {
4805  				assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
4806  				if ((option & MACH_RCV_VOUCHER) == 0) {
4807  					voucher_type = MACH_MSGH_BITS_ZERO;
4808  				}
4809  				voucher_name = MACH_PORT_NULL;
4810  			} else {
4811  				voucher_name = msg->msgh_voucher_port;
4812  			}
4813  		}
4814  
4815  		/*
4816  		 *	At this point, the space is unlocked and the destination
4817  		 *	port is locked.  (Lock taken while space was locked.)
4818  		 *	reply_name is taken care of; we still need dest_name.
4819  		 *	We still hold a ref for reply (if it is valid).
4820  		 *
4821  		 *	If the space holds receive rights for the destination,
4822  		 *	we return its name for the right.  Otherwise the task
4823  		 *	managed to destroy or give away the receive right between
4824  		 *	receiving the message and this copyout.  If the destination
4825  		 *	is dead, return MACH_PORT_DEAD, and if the receive right
4826  		 *	exists somewhere else (another space, in transit)
4827  		 *	return MACH_PORT_NULL.
4828  		 *
4829  		 *	Making this copyout operation atomic with the previous
4830  		 *	copyout of the reply port is a bit tricky.  If there was
4831  		 *	no real reply port (it wasn't IP_VALID) then this isn't
4832  		 *	an issue.  If the reply port was dead at copyout time,
4833  		 *	then we are OK, because if dest is dead we serialize
4834  		 *	after the death of both ports and if dest is alive
4835  		 *	we serialize after reply died but before dest's (later) death.
4836  		 *	So assume reply was alive when we copied it out.  If dest
4837  		 *	is alive, then we are OK because we serialize before
4838  		 *	the ports' deaths.  So assume dest is dead when we look at it.
4839  		 *	If reply dies/died after dest, then we are OK because
4840  		 *	we serialize after dest died but before reply dies.
4841  		 *	So the hard case is when reply is alive at copyout,
4842  		 *	dest is dead at copyout, and reply died before dest died.
4843  		 *	In this case pretend that dest is still alive, so
4844  		 *	we serialize while both ports are alive.
4845  		 *
4846  		 *	Because the space lock is held across the copyout of reply
4847  		 *	and locking dest, the receive right for dest can't move
4848  		 *	in or out of the space while the copyouts happen, so
4849  		 *	that isn't an atomicity problem.  In the last hard case
4850  		 *	above, this implies that when dest is dead that the
4851  		 *	space couldn't have had receive rights for dest at
4852  		 *	the time reply was copied-out, so when we pretend
4853  		 *	that dest is still alive, we can return MACH_PORT_NULL.
4854  		 *
4855  		 *	If dest == reply, then we have to make it look like
4856  		 *	either both copyouts happened before the port died,
4857  		 *	or both happened after the port died.  This special
4858  		 *	case works naturally if the timestamp comparison
4859  		 *	is done correctly.
4860  		 */
4861  
4862  		if (ip_active(dest)) {
4863  			ipc_object_copyout_dest(space, ip_to_object(dest),
4864  			    dest_type, &dest_name);
4865  			/* dest is unlocked */
4866  		} else {
4867  			ipc_port_timestamp_t timestamp;
4868  
4869  			timestamp = dest->ip_timestamp;
4870  			ip_unlock(dest);
4871  			ip_release(dest);
4872  
4873  			if (IP_VALID(reply)) {
4874  				ip_lock(reply);
4875  				if (ip_active(reply) ||
4876  				    IP_TIMESTAMP_ORDER(timestamp,
4877  				    reply->ip_timestamp)) {
4878  					dest_name = MACH_PORT_DEAD;
4879  				} else {
4880  					dest_name = MACH_PORT_NULL;
4881  				}
4882  				ip_unlock(reply);
4883  			} else {
4884  				dest_name = MACH_PORT_DEAD;
4885  			}
4886  		}
4887  
4888  		if (IP_VALID(reply)) {
4889  			ip_release(reply);
4890  		}
4891  
4892  		if (IP_VALID(release_reply_port)) {
4893  			if (reply_type == MACH_MSG_TYPE_PORT_SEND_ONCE) {
4894  				ipc_port_release_sonce(release_reply_port);
4895  			} else {
4896  				ipc_port_release_send(release_reply_port);
4897  			}
4898  		}
4899  
4900  		if ((option & MACH_RCV_VOUCHER) != 0) {
4901  			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_RECV) | DBG_FUNC_NONE,
4902  			    VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
4903  			    (uintptr_t)kmsg->ikm_header->msgh_bits,
4904  			    (uintptr_t)kmsg->ikm_header->msgh_id,
4905  			    VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(voucher)),
4906  			    0);
4907  		} else {
4908  			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_RECV_VOUCHER_REFUSED) | DBG_FUNC_NONE,
4909  			    VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
4910  			    (uintptr_t)kmsg->ikm_header->msgh_bits,
4911  			    (uintptr_t)kmsg->ikm_header->msgh_id,
4912  			    VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(voucher)),
4913  			    0);
4914  		}
4915  
4916  		if (IP_VALID(release_voucher_port)) {
4917  			ipc_port_release_send(release_voucher_port);
4918  		}
4919  
4920  		msg->msgh_bits = MACH_MSGH_BITS_SET(reply_type, dest_type,
4921  		    voucher_type, mbits);
4922  		msg->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
4923  		msg->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name);
4924  		msg->msgh_voucher_port = voucher_name;
4925  	}
4926  
4927  	return MACH_MSG_SUCCESS;
4928  }
4929  
4930  /*
4931   *	Routine:	ipc_kmsg_copyout_object
4932   *	Purpose:
4933   *		Copy-out a port right.  Always returns a name,
4934   *		even for unsuccessful return codes.  Always
4935   *		consumes the supplied object.
4936   *	Conditions:
4937   *		Nothing locked.
4938   *	Returns:
4939   *		MACH_MSG_SUCCESS	The space acquired the right
4940   *			(name is valid) or the object is dead (MACH_PORT_DEAD).
4941   *		MACH_MSG_IPC_SPACE	No room in space for the right,
4942   *			or the space is dead.  (Name is MACH_PORT_NULL.)
4943   *		MACH_MSG_IPC_KERNEL	Kernel resource shortage.
4944   *			(Name is MACH_PORT_NULL.)
4945   */
4946  static mach_msg_return_t
4947  ipc_kmsg_copyout_object(
4948  	ipc_space_t             space,
4949  	ipc_object_t            object,
4950  	mach_msg_type_name_t    msgt_name,
4951  	mach_port_context_t     *context,
4952  	mach_msg_guard_flags_t  *guard_flags,
4953  	mach_port_name_t        *namep)
4954  {
4955  	kern_return_t kr;
4956  
4957  	if (!IO_VALID(object)) {
4958  		*namep = CAST_MACH_PORT_TO_NAME(object);
4959  		return MACH_MSG_SUCCESS;
4960  	}
4961  
4962  	kr = ipc_object_copyout(space, object, msgt_name, IPC_OBJECT_COPYOUT_FLAGS_NONE,
4963  	    context, guard_flags, namep);
4964  	if (kr != KERN_SUCCESS) {
4965  		if (kr == KERN_INVALID_CAPABILITY) {
4966  			*namep = MACH_PORT_DEAD;
4967  		} else {
4968  			*namep = MACH_PORT_NULL;
4969  
4970  			if (kr == KERN_RESOURCE_SHORTAGE) {
4971  				return MACH_MSG_IPC_KERNEL;
4972  			} else {
4973  				return MACH_MSG_IPC_SPACE;
4974  			}
4975  		}
4976  	}
4977  
4978  	return MACH_MSG_SUCCESS;
4979  }
4980  
4981  static mach_msg_descriptor_t *
4982  ipc_kmsg_copyout_port_descriptor(
4983  	mach_msg_descriptor_t   *dsc,
4984  	mach_msg_descriptor_t   *dest_dsc,
4985  	ipc_space_t             space,
4986  	kern_return_t           *mr)
4987  {
4988  	mach_port_t             port;
4989  	mach_port_name_t        name;
4990  	mach_msg_type_name_t    disp;
4991  
4992  	/* Copyout port right carried in the message */
4993  	port = dsc->port.name;
4994  	disp = dsc->port.disposition;
4995  	*mr |= ipc_kmsg_copyout_object(space,
4996  	    ip_to_object(port), disp, NULL, NULL, &name);
4997  
4998  	if (current_task() == kernel_task) {
4999  		mach_msg_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc;
5000  		user_dsc--;         // point to the start of this port descriptor
5001  		bzero((void *)user_dsc, sizeof(*user_dsc));
5002  		user_dsc->name = CAST_MACH_NAME_TO_PORT(name);
5003  		user_dsc->disposition = disp;
5004  		user_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
5005  		dest_dsc = (typeof(dest_dsc))user_dsc;
5006  	} else {
5007  		mach_msg_legacy_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc;
5008  		user_dsc--;         // point to the start of this port descriptor
5009  		bzero((void *)user_dsc, sizeof(*user_dsc));
5010  		user_dsc->name = CAST_MACH_PORT_TO_NAME(name);
5011  		user_dsc->disposition = disp;
5012  		user_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
5013  		dest_dsc = (typeof(dest_dsc))user_dsc;
5014  	}
5015  
5016  	return (mach_msg_descriptor_t *)dest_dsc;
5017  }
5018  
5019  static mach_msg_descriptor_t *
5020  ipc_kmsg_copyout_ool_descriptor(
5021  	mach_msg_ool_descriptor_t   *dsc,
5022  	mach_msg_descriptor_t       *user_dsc,
5023  	int                         is_64bit,
5024  	vm_map_t                    map,
5025  	mach_msg_return_t           *mr)
5026  {
5027  	vm_map_copy_t               copy;
5028  	vm_map_address_t            rcv_addr;
5029  	mach_msg_copy_options_t     copy_options;
5030  	vm_map_size_t               size;
5031  	mach_msg_descriptor_type_t  dsc_type;
5032  	boolean_t                   misaligned = FALSE;
5033  
5034  	//SKIP_PORT_DESCRIPTORS(saddr, sdsc_count);
5035  
5036  	copy = (vm_map_copy_t)dsc->address;
5037  	size = (vm_map_size_t)dsc->size;
5038  	copy_options = dsc->copy;
5039  	assert(copy_options != MACH_MSG_KALLOC_COPY_T);
5040  	dsc_type = dsc->type;
5041  
5042  	if (copy != VM_MAP_COPY_NULL) {
5043  		kern_return_t kr;
5044  
5045  		rcv_addr = 0;
5046  		if (vm_map_copy_validate_size(map, copy, &size) == FALSE) {
5047  			panic("Inconsistent OOL/copyout size on %p: expected %d, got %lld @%p",
5048  			    dsc, dsc->size, (unsigned long long)copy->size, copy);
5049  		}
5050  
5051  		if ((copy->type == VM_MAP_COPY_ENTRY_LIST) &&
5052  		    (trunc_page(copy->offset) != copy->offset ||
5053  		    round_page(dsc->size) != dsc->size)) {
5054  			misaligned = TRUE;
5055  		}
5056  
5057  		if (misaligned) {
5058  			vm_map_address_t        rounded_addr;
5059  			vm_map_size_t   rounded_size;
5060  			vm_map_offset_t effective_page_mask, effective_page_size;
5061  
5062  			effective_page_mask = VM_MAP_PAGE_MASK(map);
5063  			effective_page_size = effective_page_mask + 1;
5064  
5065  			rounded_size = vm_map_round_page(copy->offset + size, effective_page_mask) - vm_map_trunc_page(copy->offset, effective_page_mask);
5066  
5067  			kr = vm_allocate_kernel(map, (vm_offset_t*)&rounded_addr, rounded_size, VM_FLAGS_ANYWHERE, 0);
5068  
5069  			if (kr == KERN_SUCCESS) {
5070  				/*
5071  				 * vm_map_copy_overwrite does a full copy
5072  				 * if size is too small to optimize.
5073  				 * So we tried skipping the offset adjustment
5074  				 * if we fail the 'size' test.
5075  				 *
5076  				 * if (size >= VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES * effective_page_size) {
5077  				 *
5078  				 * This resulted in leaked memory especially on the
5079  				 * older watches (16k user - 4k kernel) because we
5080  				 * would do a physical copy into the start of this
5081  				 * rounded range but could leak part of it
5082  				 * on deallocation if the 'size' being deallocated
5083  				 * does not cover the full range. So instead we do
5084  				 * the misalignment adjustment always so that on
5085  				 * deallocation we will remove the full range.
5086  				 */
5087  				if ((rounded_addr & effective_page_mask) !=
5088  				    (copy->offset & effective_page_mask)) {
5089  					/*
5090  					 * Need similar mis-alignment of source and destination...
5091  					 */
5092  					rounded_addr += (copy->offset & effective_page_mask);
5093  
5094  					assert((rounded_addr & effective_page_mask) == (copy->offset & effective_page_mask));
5095  				}
5096  				rcv_addr = rounded_addr;
5097  
5098  				kr = vm_map_copy_overwrite(map, rcv_addr, copy, size, FALSE);
5099  			}
5100  		} else {
5101  			kr = vm_map_copyout_size(map, &rcv_addr, copy, size);
5102  		}
5103  		if (kr != KERN_SUCCESS) {
5104  			if (kr == KERN_RESOURCE_SHORTAGE) {
5105  				*mr |= MACH_MSG_VM_KERNEL;
5106  			} else {
5107  				*mr |= MACH_MSG_VM_SPACE;
5108  			}
5109  			vm_map_copy_discard(copy);
5110  			rcv_addr = 0;
5111  			size = 0;
5112  		}
5113  	} else {
5114  		rcv_addr = 0;
5115  		size = 0;
5116  	}
5117  
5118  	/*
5119  	 * Now update the descriptor as the user would see it.
5120  	 * This may require expanding the descriptor to the user
5121  	 * visible size.  There is already space allocated for
5122  	 * this in what naddr points to.
5123  	 */
5124  	if (current_task() == kernel_task) {
5125  		mach_msg_ool_descriptor_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5126  		user_ool_dsc--;
5127  		bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5128  
5129  		user_ool_dsc->address = (void *)(uintptr_t)rcv_addr;
5130  		user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5131  		    TRUE : FALSE;
5132  		user_ool_dsc->copy = copy_options;
5133  		user_ool_dsc->type = dsc_type;
5134  		user_ool_dsc->size = (mach_msg_size_t)size;
5135  
5136  		user_dsc = (typeof(user_dsc))user_ool_dsc;
5137  	} else if (is_64bit) {
5138  		mach_msg_ool_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5139  		user_ool_dsc--;
5140  		bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5141  
5142  		user_ool_dsc->address = rcv_addr;
5143  		user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5144  		    TRUE : FALSE;
5145  		user_ool_dsc->copy = copy_options;
5146  		user_ool_dsc->type = dsc_type;
5147  		user_ool_dsc->size = (mach_msg_size_t)size;
5148  
5149  		user_dsc = (typeof(user_dsc))user_ool_dsc;
5150  	} else {
5151  		mach_msg_ool_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5152  		user_ool_dsc--;
5153  		bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5154  
5155  		user_ool_dsc->address = CAST_DOWN_EXPLICIT(uint32_t, rcv_addr);
5156  		user_ool_dsc->size = (mach_msg_size_t)size;
5157  		user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5158  		    TRUE : FALSE;
5159  		user_ool_dsc->copy = copy_options;
5160  		user_ool_dsc->type = dsc_type;
5161  
5162  		user_dsc = (typeof(user_dsc))user_ool_dsc;
5163  	}
5164  	return user_dsc;
5165  }
5166  
5167  mach_msg_descriptor_t *
5168  ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t *dsc,
5169      mach_msg_descriptor_t *user_dsc,
5170      int is_64bit,
5171      vm_map_t map,
5172      ipc_space_t space,
5173      ipc_kmsg_t kmsg,
5174      mach_msg_return_t *mr);
5175  mach_msg_descriptor_t *
5176  ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t *dsc,
5177      mach_msg_descriptor_t *user_dsc,
5178      int is_64bit,
5179      vm_map_t map,
5180      ipc_space_t space,
5181      ipc_kmsg_t kmsg,
5182      mach_msg_return_t *mr)
5183  {
5184  	mach_vm_offset_t            rcv_addr = 0;
5185  	mach_msg_type_name_t                disp;
5186  	mach_msg_type_number_t              count, i;
5187  	vm_size_t                           ports_length, names_length;
5188  
5189  	mach_msg_copy_options_t copy_options = MACH_MSG_VIRTUAL_COPY;
5190  
5191  	//SKIP_PORT_DESCRIPTORS(saddr, sdsc_count);
5192  
5193  	count = dsc->count;
5194  	disp = dsc->disposition;
5195  	ports_length = count * sizeof(mach_port_t);
5196  	names_length = count * sizeof(mach_port_name_t);
5197  
5198  	if (ports_length != 0 && dsc->address != 0) {
5199  		/*
5200  		 * Check to see if there is an overwrite descriptor
5201  		 * specified in the scatter list for this ool data.
5202  		 * The descriptor has already been verified.
5203  		 */
5204  #if 0
5205  		if (saddr != MACH_MSG_DESCRIPTOR_NULL) {
5206  			if (differs) {
5207  				OTHER_OOL_DESCRIPTOR *scatter_dsc;
5208  
5209  				scatter_dsc = (OTHER_OOL_DESCRIPTOR *)saddr;
5210  				rcv_addr = (mach_vm_offset_t) scatter_dsc->address;
5211  				copy_options = scatter_dsc->copy;
5212  			} else {
5213  				mach_msg_ool_descriptor_t *scatter_dsc;
5214  
5215  				scatter_dsc = &saddr->out_of_line;
5216  				rcv_addr = CAST_USER_ADDR_T(scatter_dsc->address);
5217  				copy_options = scatter_dsc->copy;
5218  			}
5219  			INCREMENT_SCATTER(saddr, sdsc_count, differs);
5220  		}
5221  #endif
5222  
5223  		if (copy_options == MACH_MSG_VIRTUAL_COPY) {
5224  			/*
5225  			 * Dynamically allocate the region
5226  			 */
5227  			vm_tag_t tag;
5228  			if (vm_kernel_map_is_kernel(map)) {
5229  				tag = VM_KERN_MEMORY_IPC;
5230  			} else {
5231  				tag = VM_MEMORY_MACH_MSG;
5232  			}
5233  
5234  			kern_return_t kr;
5235  			if ((kr = mach_vm_allocate_kernel(map, &rcv_addr,
5236  			    (mach_vm_size_t)names_length,
5237  			    VM_FLAGS_ANYWHERE, tag)) != KERN_SUCCESS) {
5238  				ipc_kmsg_clean_body(kmsg, 1, (mach_msg_descriptor_t *)dsc);
5239  				rcv_addr = 0;
5240  
5241  				if (kr == KERN_RESOURCE_SHORTAGE) {
5242  					*mr |= MACH_MSG_VM_KERNEL;
5243  				} else {
5244  					*mr |= MACH_MSG_VM_SPACE;
5245  				}
5246  			}
5247  		}
5248  
5249  		/*
5250  		 * Handle the port rights and copy out the names
5251  		 * for those rights out to user-space.
5252  		 */
5253  		if (rcv_addr != 0) {
5254  			ipc_object_t *objects = (ipc_object_t *) dsc->address;
5255  			mach_port_name_t *names = (mach_port_name_t *) dsc->address;
5256  
5257  			/* copyout port rights carried in the message */
5258  
5259  			for (i = 0; i < count; i++) {
5260  				ipc_object_t object = objects[i];
5261  
5262  				*mr |= ipc_kmsg_copyout_object(space, object,
5263  				    disp, NULL, NULL, &names[i]);
5264  			}
5265  
5266  			/* copyout to memory allocated above */
5267  			void *data = dsc->address;
5268  			if (copyoutmap(map, data, rcv_addr, names_length) != KERN_SUCCESS) {
5269  				*mr |= MACH_MSG_VM_SPACE;
5270  			}
5271  			kfree(data, ports_length);
5272  		}
5273  	} else {
5274  		rcv_addr = 0;
5275  	}
5276  
5277  	/*
5278  	 * Now update the descriptor based on the information
5279  	 * calculated above.
5280  	 */
5281  	if (current_task() == kernel_task) {
5282  		mach_msg_ool_ports_descriptor_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5283  		user_ool_dsc--;
5284  		bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5285  
5286  		user_ool_dsc->address = (void *)(uintptr_t)rcv_addr;
5287  		user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5288  		    TRUE : FALSE;
5289  		user_ool_dsc->copy = copy_options;
5290  		user_ool_dsc->disposition = disp;
5291  		user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
5292  		user_ool_dsc->count = count;
5293  
5294  		user_dsc = (typeof(user_dsc))user_ool_dsc;
5295  	} else if (is_64bit) {
5296  		mach_msg_ool_ports_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5297  		user_ool_dsc--;
5298  		bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5299  
5300  		user_ool_dsc->address = rcv_addr;
5301  		user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5302  		    TRUE : FALSE;
5303  		user_ool_dsc->copy = copy_options;
5304  		user_ool_dsc->disposition = disp;
5305  		user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
5306  		user_ool_dsc->count = count;
5307  
5308  		user_dsc = (typeof(user_dsc))user_ool_dsc;
5309  	} else {
5310  		mach_msg_ool_ports_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5311  		user_ool_dsc--;
5312  		bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5313  
5314  		user_ool_dsc->address = CAST_DOWN_EXPLICIT(uint32_t, rcv_addr);
5315  		user_ool_dsc->count = count;
5316  		user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5317  		    TRUE : FALSE;
5318  		user_ool_dsc->copy = copy_options;
5319  		user_ool_dsc->disposition = disp;
5320  		user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
5321  
5322  		user_dsc = (typeof(user_dsc))user_ool_dsc;
5323  	}
5324  	return user_dsc;
5325  }
5326  
5327  static mach_msg_descriptor_t *
5328  ipc_kmsg_copyout_guarded_port_descriptor(
5329  	mach_msg_guarded_port_descriptor_t *dsc,
5330  	mach_msg_descriptor_t *dest_dsc,
5331  	int is_64bit,
5332  	__unused ipc_kmsg_t  kmsg,
5333  	ipc_space_t space,
5334  	mach_msg_option_t option,
5335  	kern_return_t *mr)
5336  {
5337  	mach_port_t                 port;
5338  	mach_port_name_t            name = MACH_PORT_NULL;
5339  	mach_msg_type_name_t        disp;
5340  	mach_msg_guard_flags_t      guard_flags;
5341  	mach_port_context_t         context;
5342  
5343  	/* Copyout port right carried in the message */
5344  	port = dsc->name;
5345  	disp = dsc->disposition;
5346  	guard_flags = dsc->flags;
5347  	context = 0;
5348  
5349  	/* Currently kernel_task doesnt support receiving guarded port descriptors */
5350  	struct knote *kn = current_thread()->ith_knote;
5351  	if ((kn != ITH_KNOTE_PSEUDO) && (((option & MACH_RCV_GUARDED_DESC) == 0) ||
5352  	    (current_task() == kernel_task))) {
5353  #if DEVELOPMENT || DEBUG
5354  		if (current_task() != kernel_task) {
5355  			/*
5356  			 * Simulated crash needed for debugging, notifies the receiver to opt into receiving
5357  			 * guarded descriptors.
5358  			 */
5359  			mach_port_guard_exception(current_thread()->ith_receiver_name, 0, 0, kGUARD_EXC_RCV_GUARDED_DESC);
5360  		}
5361  #endif
5362  		KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_DESTROY_GUARDED_DESC), current_thread()->ith_receiver_name,
5363  		    VM_KERNEL_ADDRPERM(port), disp, guard_flags);
5364  		ipc_object_destroy(ip_to_object(port), disp);
5365  		mach_msg_legacy_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc;
5366  		user_dsc--;         // point to the start of this port descriptor
5367  		bzero((void *)user_dsc, sizeof(*user_dsc));
5368  		user_dsc->name = name;
5369  		user_dsc->disposition = disp;
5370  		user_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
5371  		dest_dsc = (typeof(dest_dsc))user_dsc;
5372  	} else {
5373  		*mr |= ipc_kmsg_copyout_object(space,
5374  		    ip_to_object(port), disp, &context, &guard_flags, &name);
5375  
5376  		if (!is_64bit) {
5377  			mach_msg_guarded_port_descriptor32_t *user_dsc = (typeof(user_dsc))dest_dsc;
5378  			user_dsc--;         // point to the start of this port descriptor
5379  			bzero((void *)user_dsc, sizeof(*user_dsc));
5380  			user_dsc->name = name;
5381  			user_dsc->flags = guard_flags;
5382  			user_dsc->disposition = disp;
5383  			user_dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
5384  			user_dsc->context = CAST_DOWN_EXPLICIT(uint32_t, context);
5385  			dest_dsc = (typeof(dest_dsc))user_dsc;
5386  		} else {
5387  			mach_msg_guarded_port_descriptor64_t *user_dsc = (typeof(user_dsc))dest_dsc;
5388  			user_dsc--;         // point to the start of this port descriptor
5389  			bzero((void *)user_dsc, sizeof(*user_dsc));
5390  			user_dsc->name = name;
5391  			user_dsc->flags = guard_flags;
5392  			user_dsc->disposition = disp;
5393  			user_dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
5394  			user_dsc->context = context;
5395  			dest_dsc = (typeof(dest_dsc))user_dsc;
5396  		}
5397  	}
5398  
5399  	return (mach_msg_descriptor_t *)dest_dsc;
5400  }
5401  
5402  
5403  /*
5404   *	Routine:	ipc_kmsg_copyout_body
5405   *	Purpose:
5406   *		"Copy-out" port rights and out-of-line memory
5407   *		in the body of a message.
5408   *
5409   *		The error codes are a combination of special bits.
5410   *		The copyout proceeds despite errors.
5411   *	Conditions:
5412   *		Nothing locked.
5413   *	Returns:
5414   *		MACH_MSG_SUCCESS	Successful copyout.
5415   *		MACH_MSG_IPC_SPACE	No room for port right in name space.
5416   *		MACH_MSG_VM_SPACE	No room for memory in address space.
5417   *		MACH_MSG_IPC_KERNEL	Resource shortage handling port right.
5418   *		MACH_MSG_VM_KERNEL	Resource shortage handling memory.
5419   *		MACH_MSG_INVALID_RT_DESCRIPTOR Descriptor incompatible with RT
5420   */
5421  
5422  mach_msg_return_t
5423  ipc_kmsg_copyout_body(
5424  	ipc_kmsg_t              kmsg,
5425  	ipc_space_t             space,
5426  	vm_map_t                map,
5427  	mach_msg_option_t       option,
5428  	mach_msg_body_t         *slist)
5429  {
5430  	mach_msg_body_t             *body;
5431  	mach_msg_descriptor_t       *kern_dsc, *user_dsc;
5432  	mach_msg_descriptor_t       *saddr;
5433  	mach_msg_type_number_t      dsc_count, sdsc_count;
5434  	int i;
5435  	mach_msg_return_t           mr = MACH_MSG_SUCCESS;
5436  	boolean_t                   is_task_64bit = (map->max_offset > VM_MAX_ADDRESS);
5437  
5438  	body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
5439  	dsc_count = body->msgh_descriptor_count;
5440  	kern_dsc = (mach_msg_descriptor_t *) (body + 1);
5441  	/* Point user_dsc just after the end of all the descriptors */
5442  	user_dsc = &kern_dsc[dsc_count];
5443  
5444  	/* Do scatter list setup */
5445  	if (slist != MACH_MSG_BODY_NULL) {
5446  		panic("Scatter lists disabled");
5447  		saddr = (mach_msg_descriptor_t *) (slist + 1);
5448  		sdsc_count = slist->msgh_descriptor_count;
5449  	} else {
5450  		saddr = MACH_MSG_DESCRIPTOR_NULL;
5451  		sdsc_count = 0;
5452  	}
5453  
5454  	/* Now process the descriptors - in reverse order */
5455  	for (i = dsc_count - 1; i >= 0; i--) {
5456  		switch (kern_dsc[i].type.type) {
5457  		case MACH_MSG_PORT_DESCRIPTOR:
5458  			user_dsc = ipc_kmsg_copyout_port_descriptor(&kern_dsc[i],
5459  			    user_dsc, space, &mr);
5460  			break;
5461  		case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
5462  		case MACH_MSG_OOL_DESCRIPTOR:
5463  			user_dsc = ipc_kmsg_copyout_ool_descriptor(
5464  				(mach_msg_ool_descriptor_t *)&kern_dsc[i],
5465  				user_dsc, is_task_64bit, map, &mr);
5466  			break;
5467  		case MACH_MSG_OOL_PORTS_DESCRIPTOR:
5468  			user_dsc = ipc_kmsg_copyout_ool_ports_descriptor(
5469  				(mach_msg_ool_ports_descriptor_t *)&kern_dsc[i],
5470  				user_dsc, is_task_64bit, map, space, kmsg, &mr);
5471  			break;
5472  		case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
5473  			user_dsc = ipc_kmsg_copyout_guarded_port_descriptor(
5474  				(mach_msg_guarded_port_descriptor_t *)&kern_dsc[i],
5475  				user_dsc, is_task_64bit, kmsg, space, option, &mr);
5476  			break;
5477  		default: {
5478  			panic("untyped IPC copyout body: invalid message descriptor");
5479  		}
5480  		}
5481  	}
5482  
5483  	if (user_dsc != kern_dsc) {
5484  		vm_offset_t dsc_adjust = (vm_offset_t)user_dsc - (vm_offset_t)kern_dsc;
5485  		memmove((char *)((vm_offset_t)kmsg->ikm_header + dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
5486  		kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header + dsc_adjust);
5487  		/* Update the message size for the smaller user representation */
5488  		kmsg->ikm_header->msgh_size -= (mach_msg_size_t)dsc_adjust;
5489  	}
5490  
5491  	return mr;
5492  }
5493  
5494  /*
5495   *	Routine:	ipc_kmsg_copyout_size
5496   *	Purpose:
5497   *		Compute the size of the message as copied out to the given
5498   *		map. If the destination map's pointers are a different size
5499   *		than the kernel's, we have to allow for expansion/
5500   *		contraction of the descriptors as appropriate.
5501   *	Conditions:
5502   *		Nothing locked.
5503   *	Returns:
5504   *		size of the message as it would be received.
5505   */
5506  
5507  mach_msg_size_t
5508  ipc_kmsg_copyout_size(
5509  	ipc_kmsg_t              kmsg,
5510  	vm_map_t                map)
5511  {
5512  	mach_msg_size_t             send_size;
5513  
5514  	send_size = kmsg->ikm_header->msgh_size;
5515  
5516  	boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS);
5517  
5518  #if defined(__LP64__)
5519  	send_size -= LEGACY_HEADER_SIZE_DELTA;
5520  #endif
5521  
5522  	if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
5523  		mach_msg_body_t *body;
5524  		mach_msg_descriptor_t *saddr, *eaddr;
5525  
5526  		body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
5527  		saddr = (mach_msg_descriptor_t *) (body + 1);
5528  		eaddr = saddr + body->msgh_descriptor_count;
5529  
5530  		for (; saddr < eaddr; saddr++) {
5531  			switch (saddr->type.type) {
5532  			case MACH_MSG_OOL_DESCRIPTOR:
5533  			case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
5534  			case MACH_MSG_OOL_PORTS_DESCRIPTOR:
5535  			case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
5536  				if (!is_task_64bit) {
5537  					send_size -= DESC_SIZE_ADJUSTMENT;
5538  				}
5539  				break;
5540  			case MACH_MSG_PORT_DESCRIPTOR:
5541  				send_size -= DESC_SIZE_ADJUSTMENT;
5542  				break;
5543  			default:
5544  				break;
5545  			}
5546  		}
5547  	}
5548  	return send_size;
5549  }
5550  
5551  /*
5552   *	Routine:	ipc_kmsg_copyout
5553   *	Purpose:
5554   *		"Copy-out" port rights and out-of-line memory
5555   *		in the message.
5556   *	Conditions:
5557   *		Nothing locked.
5558   *	Returns:
5559   *		MACH_MSG_SUCCESS	Copied out all rights and memory.
5560   *		MACH_RCV_HEADER_ERROR + special bits
5561   *			Rights and memory in the message are intact.
5562   *		MACH_RCV_BODY_ERROR + special bits
5563   *			The message header was successfully copied out.
5564   *			As much of the body was handled as possible.
5565   */
5566  
5567  mach_msg_return_t
5568  ipc_kmsg_copyout(
5569  	ipc_kmsg_t              kmsg,
5570  	ipc_space_t             space,
5571  	vm_map_t                map,
5572  	mach_msg_body_t         *slist,
5573  	mach_msg_option_t      option)
5574  {
5575  	mach_msg_return_t mr;
5576  
5577  	ikm_validate_sig(kmsg);
5578  
5579  	mr = ipc_kmsg_copyout_header(kmsg, space, option);
5580  	if (mr != MACH_MSG_SUCCESS) {
5581  		return mr;
5582  	}
5583  
5584  	if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
5585  		mr = ipc_kmsg_copyout_body(kmsg, space, map, option, slist);
5586  
5587  		if (mr != MACH_MSG_SUCCESS) {
5588  			mr |= MACH_RCV_BODY_ERROR;
5589  		}
5590  	}
5591  
5592  	return mr;
5593  }
5594  
5595  /*
5596   *	Routine:	ipc_kmsg_copyout_pseudo
5597   *	Purpose:
5598   *		Does a pseudo-copyout of the message.
5599   *		This is like a regular copyout, except
5600   *		that the ports in the header are handled
5601   *		as if they are in the body.  They aren't reversed.
5602   *
5603   *		The error codes are a combination of special bits.
5604   *		The copyout proceeds despite errors.
5605   *	Conditions:
5606   *		Nothing locked.
5607   *	Returns:
5608   *		MACH_MSG_SUCCESS	Successful copyout.
5609   *		MACH_MSG_IPC_SPACE	No room for port right in name space.
5610   *		MACH_MSG_VM_SPACE	No room for memory in address space.
5611   *		MACH_MSG_IPC_KERNEL	Resource shortage handling port right.
5612   *		MACH_MSG_VM_KERNEL	Resource shortage handling memory.
5613   */
5614  
5615  mach_msg_return_t
5616  ipc_kmsg_copyout_pseudo(
5617  	ipc_kmsg_t              kmsg,
5618  	ipc_space_t             space,
5619  	vm_map_t                map,
5620  	mach_msg_body_t         *slist)
5621  {
5622  	mach_msg_bits_t mbits = kmsg->ikm_header->msgh_bits;
5623  	ipc_object_t dest = ip_to_object(kmsg->ikm_header->msgh_remote_port);
5624  	ipc_object_t reply = ip_to_object(kmsg->ikm_header->msgh_local_port);
5625  	ipc_object_t voucher = ip_to_object(kmsg->ikm_voucher);
5626  	mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
5627  	mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
5628  	mach_msg_type_name_t voucher_type = MACH_MSGH_BITS_VOUCHER(mbits);
5629  	mach_port_name_t voucher_name = kmsg->ikm_header->msgh_voucher_port;
5630  	mach_port_name_t dest_name, reply_name;
5631  	mach_msg_return_t mr;
5632  
5633  	/* Set ith_knote to ITH_KNOTE_PSEUDO */
5634  	current_thread()->ith_knote = ITH_KNOTE_PSEUDO;
5635  
5636  	ikm_validate_sig(kmsg);
5637  
5638  	assert(IO_VALID(dest));
5639  
5640  #if 0
5641  	/*
5642  	 * If we did this here, it looks like we wouldn't need the undo logic
5643  	 * at the end of ipc_kmsg_send() in the error cases.  Not sure which
5644  	 * would be more elegant to keep.
5645  	 */
5646  	ipc_importance_clean(kmsg);
5647  #else
5648  	/* just assert it is already clean */
5649  	ipc_importance_assert_clean(kmsg);
5650  #endif
5651  
5652  	mr = (ipc_kmsg_copyout_object(space, dest, dest_type, NULL, NULL, &dest_name) |
5653  	    ipc_kmsg_copyout_object(space, reply, reply_type, NULL, NULL, &reply_name));
5654  
5655  	kmsg->ikm_header->msgh_bits = mbits & MACH_MSGH_BITS_USER;
5656  	kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(dest_name);
5657  	kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(reply_name);
5658  
5659  	if (IO_VALID(voucher)) {
5660  		assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
5661  
5662  		kmsg->ikm_voucher = IP_NULL;
5663  		mr |= ipc_kmsg_copyout_object(space, voucher, voucher_type, NULL, NULL, &voucher_name);
5664  		kmsg->ikm_header->msgh_voucher_port = voucher_name;
5665  	}
5666  
5667  	if (mbits & MACH_MSGH_BITS_COMPLEX) {
5668  		mr |= ipc_kmsg_copyout_body(kmsg, space, map, 0, slist);
5669  	}
5670  
5671  	return mr;
5672  }
5673  
5674  /*
5675   *	Routine:	ipc_kmsg_copyout_dest
5676   *	Purpose:
5677   *		Copies out the destination port in the message.
5678   *		Destroys all other rights and memory in the message.
5679   *	Conditions:
5680   *		Nothing locked.
5681   */
5682  
5683  void
5684  ipc_kmsg_copyout_dest(
5685  	ipc_kmsg_t      kmsg,
5686  	ipc_space_t     space)
5687  {
5688  	mach_msg_bits_t mbits;
5689  	ipc_object_t dest;
5690  	ipc_object_t reply;
5691  	ipc_object_t voucher;
5692  	mach_msg_type_name_t dest_type;
5693  	mach_msg_type_name_t reply_type;
5694  	mach_msg_type_name_t voucher_type;
5695  	mach_port_name_t dest_name, reply_name, voucher_name;
5696  
5697  	ikm_validate_sig(kmsg);
5698  
5699  	mbits = kmsg->ikm_header->msgh_bits;
5700  	dest = ip_to_object(kmsg->ikm_header->msgh_remote_port);
5701  	reply = ip_to_object(kmsg->ikm_header->msgh_local_port);
5702  	voucher = ip_to_object(kmsg->ikm_voucher);
5703  	voucher_name = kmsg->ikm_header->msgh_voucher_port;
5704  	dest_type = MACH_MSGH_BITS_REMOTE(mbits);
5705  	reply_type = MACH_MSGH_BITS_LOCAL(mbits);
5706  	voucher_type = MACH_MSGH_BITS_VOUCHER(mbits);
5707  
5708  	assert(IO_VALID(dest));
5709  
5710  	ipc_importance_assert_clean(kmsg);
5711  
5712  	io_lock(dest);
5713  	if (io_active(dest)) {
5714  		ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
5715  		/* dest is unlocked */
5716  	} else {
5717  		io_unlock(dest);
5718  		io_release(dest);
5719  		dest_name = MACH_PORT_DEAD;
5720  	}
5721  
5722  	if (IO_VALID(reply)) {
5723  		ipc_object_destroy(reply, reply_type);
5724  		reply_name = MACH_PORT_NULL;
5725  	} else {
5726  		reply_name = CAST_MACH_PORT_TO_NAME(reply);
5727  	}
5728  
5729  	if (IO_VALID(voucher)) {
5730  		assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
5731  
5732  		kmsg->ikm_voucher = IP_NULL;
5733  		ipc_object_destroy(voucher, voucher_type);
5734  		voucher_name = MACH_PORT_NULL;
5735  	}
5736  
5737  	kmsg->ikm_header->msgh_bits = MACH_MSGH_BITS_SET(reply_type, dest_type,
5738  	    voucher_type, mbits);
5739  	kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
5740  	kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name);
5741  	kmsg->ikm_header->msgh_voucher_port = voucher_name;
5742  
5743  	if (mbits & MACH_MSGH_BITS_COMPLEX) {
5744  		mach_msg_body_t *body;
5745  
5746  		body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
5747  		ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count,
5748  		    (mach_msg_descriptor_t *)(body + 1));
5749  	}
5750  }
5751  
5752  /*
5753   *	Routine:	ipc_kmsg_copyout_to_kernel
5754   *	Purpose:
5755   *		Copies out the destination and reply ports in the message.
5756   *		Leaves all other rights and memory in the message alone.
5757   *	Conditions:
5758   *		Nothing locked.
5759   *
5760   *	Derived from ipc_kmsg_copyout_dest.
5761   *	Use by mach_msg_rpc_from_kernel (which used to use copyout_dest).
5762   *	We really do want to save rights and memory.
5763   */
5764  
5765  void
5766  ipc_kmsg_copyout_to_kernel(
5767  	ipc_kmsg_t      kmsg,
5768  	ipc_space_t     space)
5769  {
5770  	ipc_object_t dest;
5771  	mach_port_t reply;
5772  	mach_msg_type_name_t dest_type;
5773  	mach_msg_type_name_t reply_type;
5774  	mach_port_name_t dest_name;
5775  
5776  	ikm_validate_sig(kmsg);
5777  
5778  	dest = ip_to_object(kmsg->ikm_header->msgh_remote_port);
5779  	reply = kmsg->ikm_header->msgh_local_port;
5780  	dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits);
5781  	reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits);
5782  
5783  	assert(IO_VALID(dest));
5784  
5785  	io_lock(dest);
5786  	if (io_active(dest)) {
5787  		ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
5788  		/* dest is unlocked */
5789  	} else {
5790  		io_unlock(dest);
5791  		io_release(dest);
5792  		dest_name = MACH_PORT_DEAD;
5793  	}
5794  
5795  	/*
5796  	 * While MIG kernel users don't receive vouchers, the
5797  	 * msgh_voucher_port field is intended to be round-tripped through the
5798  	 * kernel if there is no voucher disposition set. Here we check for a
5799  	 * non-zero voucher disposition, and consume the voucher send right as
5800  	 * there is no possible way to specify MACH_RCV_VOUCHER semantics.
5801  	 */
5802  	mach_msg_type_name_t voucher_type;
5803  	voucher_type = MACH_MSGH_BITS_VOUCHER(kmsg->ikm_header->msgh_bits);
5804  	if (voucher_type != MACH_MSGH_BITS_ZERO) {
5805  		assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
5806  		/*
5807  		 * someone managed to send this kernel routine a message with
5808  		 * a voucher in it. Cleanup the reference in
5809  		 * kmsg->ikm_voucher.
5810  		 */
5811  		if (IP_VALID(kmsg->ikm_voucher)) {
5812  			ipc_port_release_send(kmsg->ikm_voucher);
5813  		}
5814  		kmsg->ikm_voucher = IP_NULL;
5815  		kmsg->ikm_header->msgh_voucher_port = 0;
5816  	}
5817  
5818  	kmsg->ikm_header->msgh_bits =
5819  	    (MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) |
5820  	    MACH_MSGH_BITS(reply_type, dest_type));
5821  	kmsg->ikm_header->msgh_local_port =  CAST_MACH_NAME_TO_PORT(dest_name);
5822  	kmsg->ikm_header->msgh_remote_port = reply;
5823  }
5824  
5825  #if IKM_SUPPORT_LEGACY
5826  void
5827  ipc_kmsg_copyout_to_kernel_legacy(
5828  	ipc_kmsg_t      kmsg,
5829  	ipc_space_t     space)
5830  {
5831  	ipc_object_t dest;
5832  	mach_port_t  reply;
5833  	mach_msg_type_name_t dest_type;
5834  	mach_msg_type_name_t reply_type;
5835  	mach_port_name_t dest_name;
5836  
5837  	ikm_validate_sig(kmsg);
5838  
5839  	dest = ip_to_object(kmsg->ikm_header->msgh_remote_port);
5840  	reply = kmsg->ikm_header->msgh_local_port;
5841  	dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits);
5842  	reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits);
5843  
5844  	assert(IO_VALID(dest));
5845  
5846  	io_lock(dest);
5847  	if (io_active(dest)) {
5848  		ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
5849  		/* dest is unlocked */
5850  	} else {
5851  		io_unlock(dest);
5852  		io_release(dest);
5853  		dest_name = MACH_PORT_DEAD;
5854  	}
5855  
5856  	mach_msg_type_name_t voucher_type;
5857  	voucher_type = MACH_MSGH_BITS_VOUCHER(kmsg->ikm_header->msgh_bits);
5858  	if (voucher_type != MACH_MSGH_BITS_ZERO) {
5859  		assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
5860  		assert(IP_VALID(kmsg->ikm_voucher));
5861  		/*
5862  		 * someone managed to send this kernel routine a message with
5863  		 * a voucher in it. Cleanup the reference in
5864  		 * kmsg->ikm_voucher.
5865  		 */
5866  		ipc_port_release_send(kmsg->ikm_voucher);
5867  		kmsg->ikm_voucher = IP_NULL;
5868  		kmsg->ikm_header->msgh_voucher_port = 0;
5869  	}
5870  
5871  	kmsg->ikm_header->msgh_bits =
5872  	    (MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) |
5873  	    MACH_MSGH_BITS(reply_type, dest_type));
5874  	kmsg->ikm_header->msgh_local_port =  CAST_MACH_NAME_TO_PORT(dest_name);
5875  	kmsg->ikm_header->msgh_remote_port = reply;
5876  
5877  	mach_msg_descriptor_t *saddr;
5878  	mach_msg_legacy_descriptor_t *daddr;
5879  	mach_msg_type_number_t i, count = ((mach_msg_base_t *)kmsg->ikm_header)->body.msgh_descriptor_count;
5880  	saddr = (mach_msg_descriptor_t *) (((mach_msg_base_t *)kmsg->ikm_header) + 1);
5881  	saddr = &saddr[count - 1];
5882  	daddr = (mach_msg_legacy_descriptor_t *)&saddr[count];
5883  	daddr--;
5884  
5885  	vm_offset_t dsc_adjust = 0;
5886  
5887  	for (i = 0; i < count; i++, saddr--, daddr--) {
5888  		switch (saddr->type.type) {
5889  		case MACH_MSG_PORT_DESCRIPTOR: {
5890  			mach_msg_port_descriptor_t *dsc = &saddr->port;
5891  			mach_msg_legacy_port_descriptor_t *dest_dsc = &daddr->port;
5892  
5893  			mach_port_t name = dsc->name;
5894  			mach_msg_type_name_t disposition = dsc->disposition;
5895  
5896  			dest_dsc->name = CAST_MACH_PORT_TO_NAME(name);
5897  			dest_dsc->disposition = disposition;
5898  			dest_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
5899  			break;
5900  		}
5901  		case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
5902  		case MACH_MSG_OOL_DESCRIPTOR: {
5903  			/* The sender should supply ready-made memory, i.e. a vm_map_copy_t
5904  			 * so we don't need to do anything special. */
5905  
5906  			mach_msg_ool_descriptor_t       *source_dsc = (typeof(source_dsc)) & saddr->out_of_line;
5907  
5908  			mach_msg_ool_descriptor32_t *dest_dsc = &daddr->out_of_line32;
5909  
5910  			vm_offset_t                         address = (vm_offset_t)source_dsc->address;
5911  			vm_size_t                       size = source_dsc->size;
5912  			boolean_t                       deallocate = source_dsc->deallocate;
5913  			mach_msg_copy_options_t         copy = source_dsc->copy;
5914  			mach_msg_descriptor_type_t  type = source_dsc->type;
5915  
5916  			dest_dsc->address = address;
5917  			dest_dsc->size = size;
5918  			dest_dsc->deallocate = deallocate;
5919  			dest_dsc->copy = copy;
5920  			dest_dsc->type = type;
5921  			break;
5922  		}
5923  		case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
5924  			mach_msg_ool_ports_descriptor_t         *source_dsc = (typeof(source_dsc)) & saddr->ool_ports;
5925  
5926  			mach_msg_ool_ports_descriptor32_t   *dest_dsc = &daddr->ool_ports32;
5927  
5928  			vm_offset_t                         address = (vm_offset_t)source_dsc->address;
5929  			vm_size_t                       port_count = source_dsc->count;
5930  			boolean_t                       deallocate = source_dsc->deallocate;
5931  			mach_msg_copy_options_t         copy = source_dsc->copy;
5932  			mach_msg_descriptor_type_t  type = source_dsc->type;
5933  
5934  			dest_dsc->address = address;
5935  			dest_dsc->count = port_count;
5936  			dest_dsc->deallocate = deallocate;
5937  			dest_dsc->copy = copy;
5938  			dest_dsc->type = type;
5939  			break;
5940  		}
5941  		case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
5942  			mach_msg_guarded_port_descriptor_t *source_dsc = (typeof(source_dsc)) & saddr->guarded_port;
5943  			mach_msg_guarded_port_descriptor32_t *dest_dsc = &daddr->guarded_port32;
5944  
5945  			dest_dsc->name = CAST_MACH_PORT_TO_NAME(source_dsc->name);
5946  			dest_dsc->disposition = source_dsc->disposition;
5947  			dest_dsc->flags = 0;
5948  			dest_dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
5949  			dest_dsc->context = 0;
5950  			break;
5951  		}
5952  		default: {
5953  #if     MACH_ASSERT
5954  			panic("ipc_kmsg_copyout_to_kernel_legacy: bad descriptor");
5955  #endif  /* MACH_ASSERT */
5956  		}
5957  		}
5958  	}
5959  
5960  	if (count) {
5961  		dsc_adjust = 4 * count;
5962  		memmove((char *)((vm_offset_t)kmsg->ikm_header + dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
5963  		kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header + dsc_adjust);
5964  		/* Update the message size for the smaller user representation */
5965  		kmsg->ikm_header->msgh_size -= dsc_adjust;
5966  	}
5967  }
5968  #endif /* IKM_SUPPORT_LEGACY */
5969  
5970  #ifdef __arm64__
5971  /*
5972   * Just sets those parts of the trailer that aren't set up at allocation time.
5973   */
5974  static void
5975  ipc_kmsg_munge_trailer(mach_msg_max_trailer_t *in, void *_out, boolean_t is64bit)
5976  {
5977  	if (is64bit) {
5978  		mach_msg_max_trailer64_t *out = (mach_msg_max_trailer64_t*)_out;
5979  		out->msgh_seqno = in->msgh_seqno;
5980  		out->msgh_context = in->msgh_context;
5981  		out->msgh_trailer_size = in->msgh_trailer_size;
5982  		out->msgh_ad = in->msgh_ad;
5983  	} else {
5984  		mach_msg_max_trailer32_t *out = (mach_msg_max_trailer32_t*)_out;
5985  		out->msgh_seqno = in->msgh_seqno;
5986  		out->msgh_context = (mach_port_context32_t)in->msgh_context;
5987  		out->msgh_trailer_size = in->msgh_trailer_size;
5988  		out->msgh_ad = in->msgh_ad;
5989  	}
5990  }
5991  #endif /* __arm64__ */
5992  
5993  mach_msg_trailer_size_t
5994  ipc_kmsg_trailer_size(
5995  	mach_msg_option_t option,
5996  	__unused thread_t thread)
5997  {
5998  	if (!(option & MACH_RCV_TRAILER_MASK)) {
5999  		return MACH_MSG_TRAILER_MINIMUM_SIZE;
6000  	} else {
6001  		return REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread), option);
6002  	}
6003  }
6004  
6005  void
6006  ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space __unused,
6007      mach_msg_option_t option, __unused thread_t thread,
6008      mach_port_seqno_t seqno, boolean_t minimal_trailer,
6009      mach_vm_offset_t context)
6010  {
6011  	mach_msg_max_trailer_t *trailer;
6012  
6013  #ifdef __arm64__
6014  	mach_msg_max_trailer_t tmp_trailer; /* This accommodates U64, and we'll munge */
6015  	void *real_trailer_out = (void*)(mach_msg_max_trailer_t *)
6016  	    ((vm_offset_t)kmsg->ikm_header +
6017  	    mach_round_msg(kmsg->ikm_header->msgh_size));
6018  
6019  	/*
6020  	 * Populate scratch with initial values set up at message allocation time.
6021  	 * After, we reinterpret the space in the message as the right type
6022  	 * of trailer for the address space in question.
6023  	 */
6024  	bcopy(real_trailer_out, &tmp_trailer, MAX_TRAILER_SIZE);
6025  	trailer = &tmp_trailer;
6026  #else /* __arm64__ */
6027  	(void)thread;
6028  	trailer = (mach_msg_max_trailer_t *)
6029  	    ((vm_offset_t)kmsg->ikm_header +
6030  	    mach_round_msg(kmsg->ikm_header->msgh_size));
6031  #endif /* __arm64__ */
6032  
6033  	if (!(option & MACH_RCV_TRAILER_MASK)) {
6034  		return;
6035  	}
6036  
6037  	trailer->msgh_seqno = seqno;
6038  	trailer->msgh_context = context;
6039  	trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread), option);
6040  
6041  	if (minimal_trailer) {
6042  		goto done;
6043  	}
6044  
6045  	if (GET_RCV_ELEMENTS(option) >= MACH_RCV_TRAILER_AV) {
6046  		trailer->msgh_ad = kmsg->ikm_filter_policy_id;
6047  	}
6048  
6049  	/*
6050  	 * The ipc_kmsg_t holds a reference to the label of a label
6051  	 * handle, not the port. We must get a reference to the port
6052  	 * and a send right to copyout to the receiver.
6053  	 */
6054  
6055  	if (option & MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_LABELS)) {
6056  		trailer->msgh_labels.sender = 0;
6057  	}
6058  
6059  done:
6060  #ifdef __arm64__
6061  	ipc_kmsg_munge_trailer(trailer, real_trailer_out, thread_is_64bit_addr(thread));
6062  #endif /* __arm64__ */
6063  	return;
6064  }
6065  
6066  mach_msg_header_t *
6067  ipc_kmsg_msg_header(ipc_kmsg_t kmsg)
6068  {
6069  	if (NULL == kmsg) {
6070  		return NULL;
6071  	}
6072  	return kmsg->ikm_header;
6073  }